from numpy import * import operator from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from os import listdir import matplotlib.pyplot as plt def autoNorm(dataSet): minVals = dataSet.min(0) #从一列中选取最小值 maxVals = dataSet.max(0) #从一列中选取最大值 ranges = maxVals - minVals normDataSet = zeros(shape(dataSet)) m = dataSet.shape[0] #第二维的长度 normDataSet = dataSet - tile(minVals, (m, 1)) normDataSet = normDataSet / tile(ranges, (m, 1)) # element wise divide return normDataSet, ranges, minVals def classify0(inX, dataSet, labels, k): #k-近邻算法具体实现 dataSetSize = dataSet.shape[0] diffMat = tile(inX, (dataSetSize, 1)) - dataSet sqDiffMat = diffMat ** 2 sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances ** 0.5 sortedDistIndicies = distances.argsort() classCount = {} for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1 sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] def file2matrix(filename): #数据处理 fr = open(filename) numberOfLines = len(fr.readlines()) # get the number of lines in the file returnMat = zeros((numberOfLines, 3)) # prepare matrix to return classLabelVector = [] # prepare labels return fr = open(filename) index = 0 for line in fr.readlines(): line = line.strip() listFromLine = line.split('\t') returnMat[index, :] = listFromLine[0:3] classLabelVector.append(int(listFromLine[-1])) index += 1 return returnMat, classLabelVector def datingClassTest(): #归一化数值 hoRatio = 0.50 #hold out 10% datingDataMat,datingLabels = file2matrix('datingTestSet2.txt') #load data setfrom file normMat, ranges, minVals = autoNorm(datingDataMat) m = normMat.shape[0] numTestVecs = int(m*hoRatio) errorCount = 0.0 for i in range(numTestVecs): classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3) #取第i行 print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])) if (classifierResult != datingLabels[i]): errorCount += 1.0 print("the total error rate is: %f" % (errorCount/float(numTestVecs))) print(errorCount) datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') normMat, ranges, minVals = autoNorm(datingDataMat) X_train, X_test, y_train, y_test = train_test_split(normMat,datingLabels,random_state=0) clf = KNeighborsClassifier(n_neighbors=3) clf.fit(X_train, y_train) y_pred = clf.predict([datingDataMat[0]]) print(y_pred) print(clf.score(X_test,y_test))
机器学习实战——在约会网站上使用k-近邻算法
最新推荐文章于 2024-11-11 21:30:41 发布