mxnet根据相似度进行人脸样本对图片清理

清理的角度:

找出 ID (gallery) 和它的 prob 最不相似 的 数据 (可能是标注错误,不同ID的数据错误混杂等)
找出 ID 和别的 prob 最相似 的 数据 (可能是标注错误,数据重复拷贝,同一个ID多次采集等)
找出ID与ID之间最相似的。也是为了排除重复数据或者重复采集
下面的方法主要关注角度1、角度3 ,实际中 角度2 也需要非常关注!

角度1
背景:每个ID有一张ID图片和多张prob图片,id图片命名例如:AA20210501S0338.jpg prob图片命名例如:AA20210501S0338_m001.jpg
做法:加载模型,提取特征,计算id和prob的平均相似度,排序,保存

import numpy as np
import os
import mxnet as mx
from menpo.visualize import print_progress
from collections import namedtuple

Batch = namedtuple('Batch', ['data'])

def constructmodel(prefix, ctx, epoch):
  sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
  all_layers = sym.get_internals()
  sym = all_layers['fc1_output']
  model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
  model.bind(for_training=False, data_shapes=[('data', (1, 3, 112, 112))])
  model.set_params(arg_params, aux_params)
  return model

def extractfeature(model, img, featurelength = 512):
  model.forward(Batch([img]))
  net_out = model.get_outputs()
  emb = net_out[0].asnumpy()
  emb = emb.reshape((featurelength, 1))
  return emb

def id_getfeatures_dict(model, imglist, image_root="",featurelength = 512):
  features = []
  feature_dict = {}
  with open(imglist,'r') as file:
    lines = file.readlines()
    for line in print_progress(lines):
        vec = line.strip().split('\t')     
        imagepath = vec[0]
        key = os.path.splitext(imagepath)[0]
        ori_img = mx.image.imread(os.path.join(image_root,imagepath))
        oriimg = ori_img.astype('float32')
        oriimg = mx.image.imresize(oriimg, 112, 112)  # resize
        oriimg = oriimg.transpose((2, 0, 1))  # Channel first
        oriimg = oriimg.expand_dims(axis=0)  # batchify
        emb = extractfeature(model, oriimg)
        # feature_dict[imagepath] = emb
        feature_dict.setdefault(key, []).append(emb)
        # features.append(emb)
  return feature_dict


def prob_getfeatures_dict(model, imglist, image_root="",featurelength = 512):
  features = []
  feature_dict = {}
  with open(imglist,'r') as file:
    lines = file.readlines()
    for line in print_progress(lines):
        vec = line.strip().split('\t')
        imagepath = vec[0]
        key = imagepath.split('_m')[0]
        ori_img = mx.image.imread(os.path.join(image_root,imagepath))
        oriimg = ori_img.astype('float32')
        oriimg = mx.image.imresize(oriimg, 112, 112)  # resize
        oriimg = oriimg.transpose((2, 0, 1))  # Channel first
        oriimg = oriimg.expand_dims(axis=0)  # batchify
        emb = extractfeature(model, oriimg)
        # feature_dict[imagepath] = emb
        feature_dict.setdefault(key, []).append(emb)
        # features.append(emb)
  return feature_dict


def cal_sim(id_fdict,prob_fdict):
    assert len(id_fdict.keys()) == len(prob_fdict.keys())
    sim_dict = {}
    sim_num_dict = {}
    f = open('../id_prob_sims.txt', 'w')
    for k in print_progress(id_fdict.keys()):
        idfea = id_fdict[k]
        sims = []
        for probfea in prob_fdict[k]:
            s = np.dot(np.squeeze(idfea), np.squeeze(probfea)) / (np.linalg.norm(idfea) * np.linalg.norm(probfea))
            sims.append(s)
        sim_dict[k] = sum(sims)/len(sims)
        sim_num_dict[k] = len(sims)
    sim_tuple = sorted(sim_dict.items(), key=lambda item:item[1])
    for k in sim_tuple:
        f.write('%s\t%s\t%s\n'%(k[0],sim_num_dict[k[0]],k[1]))
    f.close()

modelPth = '/home/model,368'
prefix,epoch=modelPth.split(',')
ctx=[mx.gpu(0)]      
featurelen = 512
id_img_list = '../clean_id_list.txt'
prob_img_list = '../clean_prob_list.txt'
ID_path = '../ID_img_aligned'
prob_path = '../prob_img_aligned_clean'
model = constructmodel(prefix, ctx, int(epoch))
id_features_dict = id_getfeatures_dict(model, id_img_list, ID_path, featurelen)
prob_features_dict = prob_getfeatures_dict(model, prob_img_list, prob_path, featurelen)
cal_sim(id_features_dict,prob_features_dict)

注意:需要选用性能比较强的模型进行特征提取。
相似度比较低的ID需要人工check 其ID图片和prob图片的一致性

角度3
# 其他函数相同

import itertools

def cal_sim(id_fdict,prob_fdict):
    assert len(id_fdict.keys()) == len(prob_fdict.keys()),'%s-%s'%(len(id_fdict.keys()),len(prob_fdict.keys()))
    sim_dict = {}
    sim_num_dict = {}
    f = open('../id_inter_sims.txt', 'w')
    id_combins = list(itertools.combinations(id_fdict.keys(), 2))
    for (idk1,idk2) in print_progress(id_combins):
        idfea = id_fdict[idk1]
        probfea = id_fdict[idk2]
        sims = []
        s = np.dot(np.squeeze(idfea), np.squeeze(probfea)) / (np.linalg.norm(idfea) * np.linalg.norm(probfea))
        sim_dict['%s\t%s'%(idk1,idk2)] = s
    sim_tuple = sorted(sim_dict.items(), key=lambda item:item[1])
    for k in sim_tuple:
        k0,k1 = k[0].split('\t')
        f.write('%s\t%s\t%s\n'%(k0,k1,k[1]))
    f.close()


modelPth = '/home/model,368'
prefix,epoch=modelPth.split(',')
ctx=[mx.gpu(0)]
featurelen = 512
id_img_list = '../clean_id_list.txt'
ID_path = '../ID_img_aligned'
model = constructmodel(prefix, ctx, int(epoch))
id_features_dict = id_getfeatures_dict(model, id_img_list, ID_path, featurelen)
cal_sim(id_features_dict,id_features_dict)

原文链接:https://blog.csdn.net/qxqxqzzz/article/details/118091009

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值