环境: python 3.5
说明:包含一些调试过程中的注释。
核心点: 对list 及array的使用。
from numpy import *
import operator
import matplotlib.pyplot as plt
#define function to generate labels and group
def createDataSet():
group=array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels=['A','A','B','B']
return group,labels
#define function to classify the object
def classify0(inX,dataSet,labels,k):
#Distance calculation
dataSetSize=dataSet.shape[0]
diffMat=tile(inX,(dataSetSize,1))-dataSet
sqDiffMat=diffMat**2
sqDistances=sqDiffMat.sum(axis=1)# to sum by the row
distances=sqDistances**0.5
sortedDistIndicies=distances.argsort() # important how to use argsort to return the index of elements(from small to bigger)
print(len(sortedDistIndicies))
print("the k is %s"%k)
#print("the labels is %s" %labels)
#Voting with lowest k distances
classCount={}
for i in range(k):
voteIlabel=labels[sortedDistIndicies[i]]
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
#Sort dictionary
sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
#define functrion to parse files
def file2matrix(filename,dim2):
fr=open(filename)
#Get number of lines in file
numberOfLines=len(fr.readlines())
#Create NumPy matrix to return
returnMat=zeros((numberOfLines,dim2))
classLabelVector=[]
fr=open(filename)
index=0
#Parse line to a list
for line in fr.readlines():
line=line.strip()
listFromLine=line.split('\t')
returnMat[index,:]=listFromLine[0:dim2]
classLabelVector.append(str(listFromLine[-1]))
index+=1
if index==1:
print(listFromLine)
return returnMat,classLabelVector
#define function to normalize the data
def autoNorm(dataSet):
minVals=dataSet.min(0)
maxVals=dataSet.max(0)
ranges=maxVals-minVals
normDataSet=zeros(shape(dataSet))
m=dataSet.shape[0]
normDataSet=dataSet-tile(minVals,(m,1))
normDataSet=normDataSet/tile(ranges,(m,1))
return normDataSet,ranges,minVals
#Classifier testing code for dating site
def datingClassTest():
hoRatio=0.10
datingDataMat,datingLabels=file2matrix('datingTestSet.txt',3)
normMat,ranges,minVals=autoNorm(datingDataMat)
m=normMat.shape[0]
numTestVecs=int(m*hoRatio)
errorCount=0.0
for i in range(numTestVecs):
classifierResult=classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print("the classifier came back with:%d,the real anser is:%d"%(classifierResult,datingLabels[i]))
if (classifierResult!=datingLabels[i]):
errorCount+=1.0
print("the total error rate is:%f"%(errorCount/float(numTestVecs)))
#Dating site predictor function
def classifyPerson():
resultList=['not at all','in small doses','in large doses']
percentTats=float(input("percentage of time spent playing video games?"))
ffMiles=float(input("frequent flier miles earned per year?"))
iceCream=float(input("liters of ice cream consumed per year?"))
datingDataMat,datingLabels=file2matrix('data/datingTestSet2.txt',3)
#print("the datingLabels is %s"%datingLabels)
normMat,ranges,minVals=autoNorm(datingDataMat)
inArr=array([ffMiles,percentTats,iceCream])
classifierResult=classify0((inArr-minVals)/ranges,normMat,datingLabels,3)
#print(classifierResult)
print("You will probably like this person: ",resultList[int(classifierResult)-1])
if __name__=="__main__":
datingDataMat,datingLabels=file2matrix('data/datingTestSet2.txt',3)
fig=plt.figure()
ax=fig.add_subplot(211)
labels=array(datingLabels)
labels=labels.astype('float64')# to avoid error in scatter
classifyPerson()
# plt.title("Icecreame and videogames")
# plt.xlabel("Liters of Ice Cream Consumed Per Week")
# plt.ylabel("Percentage of Time Spent Playing Video Games")
# ax.scatter(datingDataMat[:,1],datingDataMat[:,2],15.0*labels,15.0*labels)
# ax=fig.add_subplot(2,2,2)
# plt.title("Icecreame and videogames")
# plt.xlabel("Percentage of Time Spent Playing Video Games")
# plt.ylabel("frequent fLYIER miles Earned Per Year")
# ax.scatter(datingDataMat[:,0],datingDataMat[:,1],15.0*labels,15.0*labels)
# plt.show()