import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
# 将数据拆分为训练集和测试集
def split_data(data, target, random_seed, ratio):
# 保证随机性
np.random.seed(random_seed)
random_index = np.random.permutation(target.size)
data = data[random_index]
target = target[random_index]
split = int(target.size * ratio) # ratio为比例
train_X = data[:split]
test_X = data[split:]
train_y = target[:split]
test_y = target[split:]
return train_X, train_y, test_X, test_y
# 模型评估(将数据打乱10次,然后求平均,算作这个模型的最终结果)
def acc_mode(knn, data, target, times, ratio):
train_acc_list = [] #将times次轮训练结果依次进这个列表
test_acc_list = [] #将times次轮训练结果依次进这个列表
for i in range(times):
train_X, train_y, test_X, test_y = split_data(data, target, random_seed=i, ratio=ratio)
knn.fit(train_X, train_y)
train_acc = knn.score(train_X, train_y)
test_acc = knn.score(test_X, test_y)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
train_acc_list = np.array(train_acc_list)
test_acc_list = np.array(test_acc_list)
return train_acc_list, test_acc_list
# 1.导入数据
data_iris = load_iris()
# 2.数据预处理
data = data_iris.data
target = data_iris.target
target_names = data_iris.target_names
print("------------------数据格式:-------------------------------")
print('X形状:', data.shape)
print('y形状:', target.shape)
print('名字:', target_names)
print("------------------分割训练集和测试集:-----------------------")
train_X, train_y, test_X, test_y = split_data(data, target, 10, 0.8)
print('训练集:', train_X.shape, train_y.shape)
print('测试集:', test_X.shape, test_y.shape)
print("------------------模型训练:-------------------------------")
train_all_mean_list = [] # 保存所有模型(k=1,3,5,...)的训练均值
test_all_mean_list = []
train_all_k_acc_list = []# 保存所有模型(k=1,3,5,...)的训练方差(暂时没用到)
test_all_k_acc_list = []
k_list = np.arange(1, 21, step=2)
for i in k_list:
knn = KNeighborsClassifier(n_neighbors=i) # 模型
train_acc_list, test_acc_list = acc_mode(knn, data, target, 10, 0.8)
train_all_mean_list.append(train_acc_list.mean())
test_all_mean_list.append(test_acc_list.mean())
print('训练均值:', train_all_mean_list)
print('训练方差:', train_all_k_acc_list)
print('测试均值:', test_all_mean_list)
print('测试方差:', test_all_k_acc_list)
print("------------------画图分析,数据分析:-------------------------------")
sns.set()
plt.plot(k_list, train_all_mean_list, label='x_train_acc')
plt.plot(k_list, test_all_mean_list, label='x_test_acc')
plt.xticks(k_list)
plt.legend()
plt.show()
# 观察测试集评分和训练集评分的差,选小的
plt.plot(k_list, np.array(train_all_mean_list)-np.array(test_all_mean_list))
plt.xticks(k_list)
plt.show()
print('通过看折线图,我们发现当K=3或K=15时,模型最好。')
# 最终训练
print("------------------再次用全部数据,最终进行训练:-----------------------")
best_model = KNeighborsClassifier(n_neighbors=15)
best_model.fit(data, target) # 全部数据
acc = best_model.score(data, target)
print('准确率:', acc)
sys.exit()
机器学习之KNN完整版(代码)
最新推荐文章于 2024-05-09 19:20:18 发布