导入对应库
import numpy as np
import dlib
import cv2
定义面部特征点
JAW_POINTS = list(range(0,17))
RIGHT_BROW_POINTS = list(range(17,22))
LEFT_BROW_POINTS = list(range(22,27))
NOSE_POINTS = list(range(27,35))
RIGHT_EYE_POINTS = list(range(36,42))
LEFT_EYE_POINTS = list(range(42,48))
MOUTH_POINTS = list(range(48,61))
FACE_POINTS =list(range(17,68))
将各特征点以列表形式放入POINTS列表并转换为元组
POINTS =[LEFT_BROW_POINTS + RIGHT_EYE_POINTS +LEFT_EYE_POINTS +RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS]
POINTStupLe=tuple(POINTS)
以下定义了几个要用到的函数
# 获取脸部掩膜
def getFaceMask(im,keyPoints):
im = np.zeros(im.shape[:2],dtype=np.float64)
for p in POINTS:
points = cv2.convexHull(keyPoints[p]) #获取凸包
cv2.fillConvexPoly(im, points,color=1) #填充凸包,数字在0~1之间
#单通道im构成3通道im(3,行,列),改变形状(行、列、3)适应0penCV
im = np.array([im, im, im]).transpose((1,2,0))
im = cv2.GaussianBlur(im,(25, 25),0)#GMM高斯混合模型,需要自行调整参数
return im
# 计算b脸仿射变换到a脸的变换矩阵M
def getM(points1,points2):
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U,S,Vt=np.linalg.svd(points1.T * points2)
R = (U * Vt).T
return np.hstack(((s2 / s1) * R,c2.T-(s2 / s1) * R * c1.T))
#获取关键特征点
def getKeyPoints(im):
rects = detector(im,1)
shape =predictor(im,rects[0])
s= np.matrix([[p.x,p.y] for p in shape.parts()])
return s
#修改b图的颜色值,使其与a图相同
def normalColor(a, b):
a_color = a.getColor()
b.setColor(a_color)
图片提取
#读取图片
a = cv2.imread("a.png")
b = cv2.imread("b.png")
detector = dlib.get_frontal_face_detector()#构造脸部位置检测器
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")#读取人脸关键点定位模型
#获取图片的68关键点
aKeyPoints = getKeyPoints(a)
bKeyPoints = getKeyPoints(b)
#保留b的原图片
bOriginal=b.copy()
#获取图片掩膜
aMask = getFaceMask(a,aKeyPoints)
bMask = getFaceMask(b,bKeyPoints)
仿射变换
#求出b脸仿射变换到a脸的变换矩阵M
M= getM(aKeyPoints[POINTStuple],bKeyPoints[POINTStuple])
#将b的脸部根据M仿射变换到a上
dsize=a.shape[:2][::-1]
bMaskWarp=cv2.warpAffine(bMask,M,dsize,borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
cv2.imshow("bMaskWarp",bMaskWarp)
mask =np.max([aMask,bMaskWarp],axis=0)cv2.waitKey()#获取脸部最大值(两个脸模板叠加)
#使用仿射矩阵M,将b映射到a
bWrap =cv2.warpAffine(b,M,dsize,borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
cv2.imshow("bWrap",bWrap)
cv2.waitKey()
扫尾工作
bcolor = normalColor(a,bWrap)#b的颜色值改为a的颜色
out =a*(1.0-mask)+ bcolor * mask #换脸
#输出原图与换脸图进行对比
cv2.imshow("a",a)
cv2.imshow("b",bOriginal)
cv2.imshow("out",out/255)
cv2.waitKey()
cv2.destroyAllWindows()