部分加载参数
def smart_partial_load_model_state_dict(model, state_dict):
# 有时候加载的.model模型包含了一些面向下游的全连接层参数我们用不到,有时候.py架构文件中有些需要加载的参数.model模型文件里又没有,解决这种不匹配问题
parsed_state_dict = {}
non_match_keys = []
pretrained_keys = []
for k, v in state_dict.items():
if k not in model.state_dict():
if k.startswith('module.'):
k = k[len('module.'):]
else:
k = 'module.' + k
if k in model.state_dict():
parsed_state_dict[k] = v
pretrained_keys.append(k)
else:
non_match_keys.append(k)
# raise ValueError('failed to match key of state dict smartly!')
non_pretrain_keys = [k for k in model.state_dict().keys() if k not in pretrained_keys]
print("[Partial Load] partial load state dict of keys: {}".format(parsed_state_dict.keys()))
print("[Partial Load] non matched keys: {}".format(non_match_keys))
print("[Partial Load] non pretrain keys: {}".format(non_pretrain_keys))
new_state_dict = model.state_dict()
new_state_dict.update(parsed_state_dict)
model.load_state_dict(new_state_dict)
目标特征拼接转换,加全局特征
# 假设features是[30, 2048] 代表30个目标框,2048维特征
features = np.frombuffer(base64.b64decode(item["features"]), dtype=np.float32).reshape(num_boxes, 2048)
boxes = np.frombuffer(base64.b64decode(item['boxes']), dtype=np.float32).reshape(num_boxes, 4)
# g_feat是全局特征,他这里是对每个框的特征相加取平均了
g_feat = np.sum(features, axis=0) / num_boxes
num_boxes = num_boxes + 1
# 30个框变成了31个框
features = np.concatenate([np.expand_dims(g_feat, axis=0), features], axis=0)
# 5纬空间特征是x,y,w,h,以及面积
# [30,5]
image_location = np.zeros((boxes.shape[0], 5), dtype=np.float32)
image_location[:,:4] = boxes
# 算面积
image_location[:,4] = (image_location[:,3] - image_location[:,1]) * (image_location[:,2] - image_location[:,0]) / (float(image_w) * float(image_h))
# 返回一个相对的,一个绝对的
image_location_ori = copy.deepcopy(image_location)
image_location[:,0] = image_location[:,0] / float(image_w)
image_location[:,1] = image_location[:,1] / float(image_h)
image_location[:,2] = image_location[:,2] / float(image_w)
image_location[:,3] = image_location[:,3] / float(image_h)
g_location = np.array([0,0,1,1,1])
image_location = np.concatenate([np.expand_dims(g_location, axis=0), image_location], axis=0)
g_location_ori = np.array([0,0,image_w,image_h,image_w*image_h])
image_location_ori = np.concatenate([np.expand_dims(g_location_ori, axis=0), image_location_ori], axis=0)