多标记KNN算法实现(Python3.6)

MLKNN算法

对于一个新实例,取其最近的k个实例,然后得到由k个实例组成的标签集合,最后通过先验概率与最大后验概率来确定新实例的标签集合。详细的内容参看周志华老师张敏灵老师的《多标记学习》

算法实现

数据预处理

MLkNNDemo.py

 

#load file
data = sio.loadmat('scene.mat')
train_bags = data['train_bags']
test_bags = data['test_bags']
train_targets = data['train_targets']
test_targets = data['test_targets']

#train_bags:9*15 to 1*135
trainBagLine = len(train_bags)
train_data = []
for i in range(trainBagLine):
    linshi = train_bags[i,0].flatten().tolist()
    train_data.append(linshi)
train_data=np.array(train_data)

#test_bags:9*15 to 1*135
testBagLine = len(test_bags)
test_data = []
for i in range(testBagLine):
    linshi = test_bags[i,0].flatten().tolist()
    test_data.append(linshi)
test_data=np.array(test_data)

#const;Num is K's value
Num = 10
Smooth = 1

#training;
Prior,PriorN,Cond,CondN = MLkNNTrain.trainClass(train_data,train_targets,Num,Smooth)
#testing
outPuts,preLabels = MLkNNTest.testClass(train_data,train_targets,test_data,test_targets,Num,Prior,PriorN,Cond,CondN)

训练

MLkNNTrain.py

 

#Train 
def trainClass(train_data,train_targets,Num,Smooth):
    #Get size of matrix
    num_class,num_training = np.mat(train_targets).shape 
    dist_matrix = np.diagflat(ones((1,num_training))*sys.maxsize)
    #Cumputing distance
    for i in range((num_training-1)):
        vector1 = train_data[i,:]
        for j in range((i+1),(num_training)):
            vector2 = train_data[j,:]
            dist_matrix[i,j] = sum((vector1-vector2)**2)**0.5
            dist_matrix[j,i] = dist_matrix[i,j]
        
#Prior and PriorN
    Prior = zeros((num_class,1))
    PriorN = zeros((num_class,1))
    for i in range(num_class):
        tempCi = sum((train_targets[i,:]==ones((1,num_training))))
        Prior[i,0] = (tempCi+1)/(Smooth*2+num_training)
        PriorN[i,0] = 1-Prior[i,0]
    
#Cond and CondN
    #Sort by distance and get index
    #find neighbors
    disMatIndex = argsort(dist_matrix)
    tempCi = zeros((num_class,Num+1))
    tempNci = zeros((num_class,Num+1))
    for i in range(num_training):
        temp = zeros((1,num_class))
        neighborLabels = []
        for j in range(Num):
            neighborLabels.append(train_targets[:,disMatIndex[i,j]])
        neighborLabels = np.mat(neighborLabels)
        neighborLabels = np.transpose(neighborLabels)
        for j in range(num_class):
            temp[0,j] = sum((neighborLabels[j,:] == ones((1,Num))))
        for j in range(num_class):
            t = int((temp[0,j]))
            if(train_targets[j,i] == 1):
                tempCi[j,t]=tempCi[j,t]+1
            else:
                tempNci[j,t] = tempNci[j,t]+1

    #get 5*11 matrix
    Cond = zeros((num_class,Num+1))
    CondN = zeros((num_class,Num+1))
    for i in range(num_class):
        temp1 = sum((tempCi[i,:]))
        temp2 = sum((tempNci[i,:]))
        for j in range(Num+1):
            Cond[i,j] = (Smooth+tempCi[i,j])/(Smooth*(Num+1)+temp1)
            CondN[i,j] = (Smooth+tempNci[i,j])/(Smooth*(Num+1)+temp2)
    
    return Prior,PriorN,Cond,CondN


测试

 

MLkNNTest.py

 

def testClass(train_data,train_targets,test_data,test_targets,Num,Prior,PriorN,Cond,CondN):
    num_class,num_training = np.mat(train_targets).shape
    num_class,num_testing = np.mat(test_targets).shape
    
#init matrix about distance
    distMatrix = zeros((num_testing,num_training))
    for i in range(num_testing):
        vector1 = test_data[i,:]
        for j in range(num_training):
            vector2 = train_data[j,:]
            distMatrix[i,j] = sum((vector1-vector2)**2)**0.5
            
    #Sort by distance and get index
    #find neighbors
    disMatIndex = argsort(distMatrix)
    
    #computing outputs
    outPuts = zeros((num_class,num_testing))
    for i in range(num_testing):
        temp = zeros((1,num_class))
        neighborLabels = []
        for j in range(Num):
            neighborLabels.append(train_targets[:,disMatIndex[i,j]])
        neighborLabels = np.mat(neighborLabels)
        #transposition
        neighborLabels = np.transpose(neighborLabels)
        for j in range(num_class):
            temp[0,j] = sum((neighborLabels[j,:] == ones((1,Num))))
        for j in range(num_class):
            t = int((temp[0,j]))
            Prob_in=Prior[j]*Cond[j,t]
            Prob_out=PriorN[j]*CondN[j,t]
            if((Prob_in+Prob_out)==0):
                outPuts[j,i]=Prior[j]
            else:
                outPuts[j,i]=Prob_in/[Prob_in+Prob_out]
                
    #Evaluation
    preLabels=zeros((num_class,num_testing))
    for i in range(num_testing):
        for j in range(num_class):
            if(outPuts[j,i]>=0.5):                             #阈值为0.5
                preLabels[j,i]=1
            else:
                preLabels[j,i]=-1
    return outPuts,preLabels

注:上述内容仅为个人学习过程中的笔记,如有不当的地方还望指正

 

 

  • 3
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值