为什么要用one-hot编码

将离散型特征使用one-hot编码,确实会让特征之间的距离计算更加合理。比如,有一个离散型特征,代表工作类型,该离散型特征,共有三个取值,不使用one-hot编码,其表示分别是x_1 = (1), x_2 = (2), x_3 = (3)。两个工作之间的距离是,(x_1, x_2) = 1, d(x_2, x_3) = 1, d(x_1, x_3) = 2。那么x_1和x_3工作之间就越不相似吗?显然这样的表示,计算出来的特征的距离是不合理。那如果使用one-hot编码,则得到x_1 = (1, 0, 0), x_2 = (0, 1, 0), x_3 = (0, 0, 1),那么两个工作之间的距离就都是sqrt(2).即每两个工作之间的距离是一样的,显得更合理。

coding: utf-8

import sys
import cv2
import math
import numpy as np
import multiprocessing as mp
import feature
import utils
import stitch
import constant as const
import testw
import stretch

if name == ‘main’:
if len(sys.argv) != 2:
print(‘[Usage] python script ’)
print(‘[Exampe] python script …/input_image/parrington’)
sys.exit(0)
# import pdb; pdb.set_trace()
input_dirname = sys.argv[1]

pool = mp.Pool(mp.cpu_count())

img_list, focal_length = utils.parse(input_dirname)   #读入的图片和焦距

# img_list = img_list[2:4]

print('Warp images to cylinder')   #因为要算柱面投影,所以需要焦距
cylinder_img_list = pool.starmap(utils.cylindrical_projection, [(img_list[i], focal_length[i]) for i in range(0,len(img_list))])

_, img_width, _ = img_list[0].shape

stitched_image = cylinder_img_list[0].copy()               #里面的图片就是 圆柱形投影曲面
height_0, width_0, _ = stitched_image.shape       #获取第一张图片的经过圆柱投影变换后的长和宽  1080 1834
shifts = [[0, 0]]                                 #这个是什么意思 
cache_feature = [[], []]                          #这个是缓存的特征吗?
# add first img for end to end align
#cylinder_img_list += [stitched_image]
flag=0

for i in range(1, len(cylinder_img_list)):
    print('Computing .... '+str(i+1)+'/'+str(len(cylinder_img_list)))
    img1 = cylinder_img_list[i-1]
    img2 = cylinder_img_list[i]
    print(' - Find features in previous img .... ', end='', flush=True)
    descriptors1, position1 = cache_feature
    if len(descriptors1) == 0:
        
        # sift = cv2.xfeatures2d.SIFT_create()
        # kp_, des = sift.detectAndCompute(img1, None)
        # descriptors1 = np.sqrt(des/(des.sum(axis=1, keepdims=True) + 1e-7))  
        # position1 = [[int(kp_i.pt[0]),int(kp_i.pt[1])] for kp_i in kp_]

        # corner_response1 = feature.harris_corner(img1, pool)
        # descriptors1, position1 = feature.extract_description(img1, corner_response1, kernel=const.DESCRIPTOR_SIZE, threshold=const.FEATURE_THRESHOLD)
        
        position1, descriptors1= testw.extract_description(img1)
 

    print(str(len(descriptors1))+' features extracted.')

    print(' - Find features in img_'+str(i+1)+' .... ', end='', flush=True)
    # corner_response2 = feature.harris_corner(img2, pool)
    # descriptors2, position2 = feature.extract_description(img2, corner_response2, kernel=const.DESCRIPTOR_SIZE, threshold=const.FEATURE_THRESHOLD)

    position2, descriptors2= testw.extract_description(img2)
    
    # sift = cv2.xfeatures2d.SIFT_create()
    # kp_, des = sift.detectAndCompute(img1, None)
    # descriptors2 = np.sqrt(des/(des.sum(axis=1, keepdims=True) + 1e-7))  
    # position2 = [[int(kp_i.pt[0]),int(kp_i.pt[1])] for kp_i in kp_]
    
    print(str(len(descriptors2))+' features extracted.')

    cache_feature = [descriptors2, position2]    #每一个特征用25个向量表示 [207,25]

    if const.DEBUG and False:
        cv2.imshow('cr1', corner_response1)
        cv2.imshow('cr2', corner_response2)
        cv2.waitKey(0)
    
    print(' - Feature matching .... ', end='', flush=True)  # 对特征点进行匹配

    # matched_pairs = feature.matching(descriptors1, descriptors2, position1, position2, pool, y_range=const.MATCHING_Y_RANGE)
    matched_pairs = testw.matching(position1, descriptors1, position2, descriptors2)
    print(str(len(matched_pairs)) +' features matched.')

    if const.DEBUG:
        utils.matched_pairs_plot(img1, img2, matched_pairs)
    print(' - Find best shift using RANSAC .... ', end='', flush=True)
    try:
        shift = stitch.RANSAC(matched_pairs, shifts[-1])
    except:
        # import pdb; pdb.set_trace()
        shift = np.array([9999, 9999]) 
        continue
        #raise 
    if shift[0] <0:
        shift[0] = 0
    print('best shift ', shift)
    """
    shift = np.array([5, -100])   #第一个参数是上下  第二个是左右移动的位置差
    # #对当前生成的shift参数进行矫正 
    if i ==3:
        shift = np.array([0, -100])
    if i ==4:
        shift = np.array([4, -100])
    if i==6:
        shift = np.array([0, -126])
    if i==8:
        shift = np.array([8, -126])
    if i==9:
        shift = np.array([0, -100])
    if i==12:
        shift = np.array([10, -470])
    if i==13:
        shift = np.array([5, -120])
    if i==17:
        shift = np.array([0, -250]) 
    if i==18:
        shift = np.array([0, -100]) 
    if i==19:
        shift = np.array([0, -105]) 
    if i >20:
        shift = np.array([0, -100]) 
    if i >23:
        shift = np.array([4, -100]) 
    if i >=28:
        shift = np.array([4, -75])
    if i==34:
        shift = np.array([0, -70]) 
    if i==35:
        shift = np.array([0, -50]) 
    if i==36:
        shift = np.array([0, -60]) 
    if i>=37:
        shift = np.array([0, -58]) 
    if i>=41:
        shift = np.array([0, -83]) 
    if i>=43:
        shift = np.array([0, -33]) 
    if i>=44:
        shift = np.array([0, -4]) 
    if i>=46:
        shift = np.array([5, -320])
    """
    shifts += [shift]

    print(' - Stitching image .... ', end='', flush=True)
    stitched_image = stitch.stitching(stitched_image, img2, shift, pool, blending=True)
    # if i == 46:
    #     import ipdb; ipdb.set_trace()
    cv2.imwrite(str(i) +'.jpg', stitched_image)
    print('Saved.')
    flag +=1
    if len(shifts) < flag:
        break

#shifts00 = np.array([[0,0,], [5, -100], [5,-100], [0, -100], [4,-100], [5,-100], [0,-126], [5,-100], [8,-126], [0,-100], [5,-100],[5,-100]]) 
#得到最后一张长图  再做截取操作
rows, cols = stitched_image.shape[:2]    #取长宽 1202 4686
width_start = int(width_0/2) # 917
width_end = cols - (width_0 - width_start)
stitched_image = stitched_image[:,width_start:width_end]  #截取了中间变量
#接下来需要计算 图像需要拉伸的位移偏量
shift_y_start = 0
for i in stitched_image[:,0][:,0][::-1]:
    if i==0:
        shift_y_start +=1
    else:
        break
shfit_y_end = 0
for i in stitched_image[:,-1][:,0][::-1]:
    if i==0:
        shfit_y_end +=1
    else:
        break
shift_y = shfit_y_end - shift_y_start   
aligned = stretch.align(stitched_image, shift_y) 
print('Cropping image')
cropped = stitch.crop(aligned)
cv2.imwrite('cropped.jpg', cropped)

""" 
print('Perform end to end alignment')
aligned = stitch.end2end_align(stitched_image, shifts)
cv2.imwrite('aligned.jpg', aligned)
print('Cropping image')
cropped = stitch.crop(aligned)
cv2.imwrite('cropped.jpg', cropped)
"""

panoramas_stitch_online

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值