利用 mediapipe 進行處理
規劃
1.先把人臉辨識,然後取出框框
2.把框框內的人臉,進行美容
-高反差保留
(1)曝光度調整
(2)綠色與藍色,疊加
(3)YUCIHighPassSkinSmoothingMaskBoost
-調整圖像亮度
-混合
3.把人臉的嘴巴,進行塗紅 (太醜了,刪除)
4.把人臉的眼睛塗黑
5.新增去背景,用綠幕,然後塞入OBS內,這樣就可以直播了
face_mesh = mp_face_mesh.FaceMesh(max_num_faces=1,min_detection_confidence=0.5,min_tracking_confidence=0.5)
selfie_segmentation = mp_selfie_segmentation.SelfieSegmentation(model_selection=0)
#face_detection = mp_face_detection.FaceDetection( model_selection=0, min_detection_confidence=0.5)
if not cap.isOpened():
print("Cannot open camera")
exit()
start_time = time.time() #紀錄
while True:
frame_counter +=1 # frame counter
ret, img = cap.read()
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results_seg = selfie_segmentation.process(img)
condition = np.stack((results_seg.segmentation_mask,) * 3, axis=-1) > 0.5
fg_image = image
bg_image = np.zeros(image.shape, dtype=np.uint8)
bg_image[:] = BG_COLOR
green_image = np.where(condition, fg_image, bg_image) # green_image 輸出影像 背景綠色 人要轉換
green_image_OK = green_image[:,:,::-1]
#取出臉的部分進行處理 img_o 為 0-1之間,results處理結果
img_o = np.array(green_image_OK ,np.float32)/255.0 # img_o 人已經轉換OK
results = face_mesh.process(green_image_OK)
if results.multi_face_landmarks:
#for detection in results.detections:
# bounding_box = detection.location_data.relative_bounding_box
for face_landmarks in results.multi_face_landmarks:
mesh_coords = landmarksDetection(green_image_OK, results, False)
y=mesh_coords[10][1]
h=abs(mesh_coords[10][1]-mesh_coords[152][1])
yy = np.maximum((y - int(h/4)),0) #np.maximun 主要是用在video時,畫面跑出外框,xx,yy值為負
hh = h + int(h/4)
x = mesh_coords[234][0]
w = abs(mesh_coords[454][0]-mesh_coords[234][0])
xx = np.maximum((x - int(w/6)),0)
ww = w + int(w/3)
# cv2.rectangle(img, (xx, yy), (xx + ww, yy + hh), (255, 255, 255), thickness = 2)
img_face=green_image_OK[yy:yy+hh,xx:xx+ww]
#取出臉的部分img_face ,進行磨皮美容
########
#input_img = np.array(img_face[...,::-1]/255.0,dtype=np.float32) #rgb 變成0-1之間 [x,y,bgr] (2320, 3088, 3)
input_img = np.array(img_face/255.0,dtype=np.float32)
ea_img = input_img * pow(2,-1.0) # 把畫面變黑 少一半
# YUCIGreenBlueChannelOverlayBlend 进行绿色和蓝色通道的混合叠加 ba_img
base = ea_img[...,1] #
overlay = ea_img[...,2] #
ba = 2.0*overlay*base #
ba_img = np.zeros((ba.shape[0],ba.shape[1],3),dtype=np.float32)
ba_img[...,0] = ba
ba_img[...,1] = ba
ba_img[...,2] = ba
# YUCIHighPass 高通滤波YUCIHighPass的环节,非常简单,先高斯模糊一下子,然后跟原图做个混合 blur_img
# 先进行高斯模糊
radius = int(np.ceil(7.0*input_img.shape[0]/750.0))
pil_img = np2pil(ba_img)
pil_blur = pil_img.filter(ImageFilter.GaussianBlur(radius))
blur_img = np.asarray(pil_blur,np.float32)/255.0
# 再进行YUCIHighPass hp_img = ba_img - blur_img + 0.5
hp_img = ba_img - blur_img + 0.5
# YUCIHighPassSkinSmoothingMaskBoost
hardLightColor = hp_img[...,2]
[x1,y1] = np.where(hardLightColor<0.5)
[x2,y2] = np.where(hardLightColor>=0.5)
for i in range(3):
hardLightColor[x1,y1] = hardLightColor[x1,y1]*hardLightColor[x1,y1]*2.0
hardLightColor[x2,y2] = 1.0 - (1.0 - hardLightColor[x2,y2]) * (1.0 - hardLightColor[x2,y2]) * 2.0
k = 255.0/(164.0-75.0);
hardLightColor = (hardLightColor - 75.0/255.0) * k
hpss_img = np.zeros((hardLightColor.shape[0],hardLightColor.shape[1],3))
hpss_img[...,0] = hardLightColor
hpss_img[...,1] = hardLightColor
hpss_img[...,2] = hardLightColor
hpss_img = np.clip(hpss_img,0,1) #將所有數據在 0-1 之間
# 先利用控制点生成cubic spline曲线
# 参照https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html#scipy.interpolate.CubicSpline
x = [0,120.0/255.0,1]
y = [0,146.0/255.0,1]#146
cs = CubicSpline(x,y)
tc_img = cs(input_img)
#### 重要 ####
blend_img = input_img * hpss_img + tc_img *(1-hpss_img)
# sharpen
from PIL import ImageEnhance
enhancer = ImageEnhance.Sharpness(np2pil(blend_img))
img_sharp = enhancer.enhance(3)
result1 = np.array(img_sharp,np.float32)/255.0
## 把磨皮內容,寫入 img_o 原本圖案內
img_o[yy:yy+hh,xx:xx+ww] = result1
## 畫眼線
img_o =utils.fillPolyTrans(img_o, [mesh_coords[p] for p in LEFT_EYE_U], utils.BLACK, opacity=0.3)
img_o =utils.fillPolyTrans(img_o, [mesh_coords[p] for p in RIGHT_EYE_U], utils.BLACK, opacity=0.3)
img_o =utils.fill_lineTrans(img_o, [mesh_coords[p] for p in LEFT_EYE_L], utils.BLACK, opacity=0.2)
img_o =utils.fill_lineTrans(img_o, [mesh_coords[p] for p in RIGHT_EYE_L], utils.BLACK, opacity=0.2)
end_time = time.time()-start_time
fps = frame_counter/end_time
img_o =utils.textWithBackground(img_o,f'FPS: {round(fps,1)}',FONTS, 1.0, (20, 50), bgOpacity=0.9, textThickness=2)
img =utils.textWithBackground(img,f'FPS: {round(fps,1)}',FONTS, 1.0, (20, 50), bgOpacity=0.9, textThickness=2)
# cv2.imshow("face",img_face)
cv2.imshow('face_green', img_o)
cv2.imshow("face_", img)
cv2.imshow("face_output", green_image_OK)
if cv2.waitKey(5) == ord('q'):
break # 按下 q 鍵停止