#a.r
rm(list = ls())
library(MASS)
#w.r
w
if(maxd!=0){
f
}else{
f
}
f
}
#dsr.r
dsr
l
norm
sum
for(i in 1:l){
sum=sum+(dis[i])^2
}
norm
norm
}
#d.r
d
l
te
te.ma
dis.ma
dist.list
sorted.list
sorted.list
}
#cal_w.r
cal_w
dists
ind
r.c
c.ma
max.dist
for(i in 1:length(ind)){
r.c[i]
}
u.c
length(c.ma)
for(i in 1:length(u.c)){
temp
c.ma[[i]]
}
largest.weight.list
ans
ans
}
#knn.dist.r
knn.dist
weight
ncl
te
for(i in 1:te){
r.list
ncl[i]
}
ncl
}
可用于实现k折交叉验证:
#训练集分类
c=numeric(10000)
for (i in 1:10000)
if (trf(trdata[,i])>0) {
c[i]=1
} else {
c[i]=2
} # if f(x)>0 belong to class 1, if f(x)<0 belong to class 2
c#class indicator
trc=c;
trdata=t(trdata)
#2-fold cross-validation
i=1
for(k in c(3,7,11,15,19,23,27,31,35,39))
{kfold=floor(10000/2)
s=sample(1:10000,kfold)
ncl=knn.dist(trdata[-s,],trdata[s,],trc[-s],k)
q3berror
cat("k-nearest neighbour ",k," result:",ncl," error:",q3berror,"\n")
i=i+1
}
从最后的交叉验证结果来看,错误率还是让人满意的,说明该模型的效果较好,也省却了参数优化的过程。有问题欢迎讨论交流,QQ
570881451.