import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits#数字数据集
np.random.seed(123)
digits = load_digits()
x, y = digits.data, digits.target
x_train, x_test, y_train, y_test = train_test_split(x, y)
print('Shape of x_train: ', x_train.shape)
print('Shape of y_train: ', y_train.shape)
print('Shape of x_test: ', x_test.shape)
print('Shape of y_test: ', y_test.shape)
fig = plt.figure(figsize=(10,8))
for i in range(10):
ax = fig.add_subplot(2, 5, i+1)#大小为2*5的第i+1个子图
plt.imshow(x[i].reshape((8,8)), cmap='gray')
plt.show()
class KNN:
def __init__(self):
pass
def fit(self,x,y):
self.data = x
self.targets = y
def euclidean_distance(self,x):#欧式距离
"""
Computes the euclidean distance between the training data and
a new input example or matrix of input examples X
"""
if x.ndim == 1:#数字
a = np.sqrt(np.sum((self.data - x)**2, axis = 1))
if x.ndim == 2:#数组
n_samples, _ = x.shape#n_samples样本行数
a = [np.sqrt(np.sum((self.data - x[i])**2, axis = 1))for i in range(n_samples)]
return np.array(a)
def predict(self,x,k=1):
# step 1: compute distance between input and training data
dists = self.euclidean_distance(x)
# step 2: find the k nearest neighbors and their classifications
if x.ndim == 1:#数字
if k == 1:
nn = np.argmin(digits)#最小值在数组中的位置
return self.targets[nn]
else:
knn = np.argsort(digits)[:k]#np.argsort()函数将数组的值从小到大排序后,并按照其相对应的索引值输出
y_knn = self.targets[knn]
max_vote = max(y_knn, key=list(y_knn).count)
return max_vote
if x.ndim == 2:#数组
knn = np.argsort(dists)[:, :k]
y_knn = self.targets[knn]
if k == 1:
return y_knn.T
else:
n_samples, _ = x.shape
max_vote = [max(y_knn[i], key=list(y_knn[i]).count) for i in range(n_samples)]
return max_vote
'初始化并训练'
knn = KNN()
knn.fit(x_train, y_train)
print('Testing one datapoint, k=1')
print('Predicted label:', knn.predict(x_test[0], k=1))
print('True label: ', y_test[0])
print()
print('Testing one datapoint, k=5')
print('Predicted label:', knn.predict(x_test[20], k=5))
print('True label: ', y_test[20])
print()
print('Testing 10 datapoint, k=1')
print('Predicted label:', knn.predict(x_test[5:15], k=1))
print('True label: ', y_test[5:15])
print()
print('Testing 10 datapoint, k=4')
print('Predicted label:', knn.predict(x_test[5:15], k=4))
print('True label: ', y_test[5:15])
print()
'测试'
y_p_test1 = knn.predict(x_test, k=1)
test_acc1 = np.sum(y_p_test1[0] == y_test) / len(y_p_test1[0]) * 100
print('Test accuracy with k=1: ', test_acc1)
y_p_test8 = knn.predict(x_test, k=8)
test_acc8 = np.sum(y_p_test8 == y_test) / len(y_p_test8) * 100
print('Test accuracy with k=8: ', test_acc8)
(二)直接调用sklearn的API
from sklearn.neighbors import KNeighborsClassifier #K近邻#
from sklearn.neighbors import KNeighborsRegressor
module = KNeighborsClassifier(n_neighbors=6)
module.fit(x, y)
predicted = module.predict(test)
predicted = module.predict_proba(test)
完整代码;
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.neighbors import KNeighborsClassifier
np.random.seed(123)
x, y = make_blobs(n_samples=1000, centers=2)
'数据集'
fig = plt.figure(figsize=(8, 6))
plt.scatter(x[:, 0], x[:, 1], c=y)
plt.title('Dataset')
plt.xlabel('First feature')
plt.ylabel('Second feature')
plt.show()
y_true = y[:, np.newaxis]
x_train, x_test, y_train, y_test = train_test_split(x, y_true)
print('Shape of x_train: ', x_train.shape)
print('Shape of y_train: ', y_train.shape)
print('Shape of x_test: ', x_test.shape)
print('Shape of y_test: ', y_test.shape)
module = KNeighborsClassifier()
module.fit(x_test, y_test)
'测试'
y_p_train = module.predict(x_train)
y_p_test = module.predict(x_test)
print('Train accuracy: ',
(100 - np.mean(np.abs(y_p_train - y_train))), '%')
print('Test accuracy: ',
(100 - np.mean(np.abs(y_p_test - y_test))), '%')