1、想法
我的想法就是有没有什么方法能够给嘴唇自动上妆,人工智能应该可以实现,在网上看了一下,感觉得静下心来,拿出时间好好研究,好久没有看神经网络了,就想着有没有什么简单的方法,然后突然我有一个大胆想法,就是能不能把嘴唇分成很多层,然后每一层上不同的颜色就可以让颜色看起来连贯起来,有点像微积分那味道了,不会突兀了,现在想想真后悔没有早早掐断这个鬼想法,我居然还想着试试看哈哈哈。
2、实现思路
2.1 嘴唇分层
首先把嘴唇分层,在每一层中填充上颜色,只要层足够多,看上去颜色就是渐变的,没有突变的感觉。
2.2 分配颜色
为每一层合理分配颜色,怎么分配呢,这是一个问题,我的想法比较简单,假如要涂上一个大红色的口红,嘴唇分了5层,那么我取第3层是口红的颜色,然后第一层和第五层的颜色选择是嘴唇的颜色,那好问题来了,嘴唇的颜色怎么选择呢,我的选择方法也简单粗暴,用的就是dlib人脸特征检测点中检测点颜色的平均值,有关dlib的内容大家可以参考树莓派上利用python+opencv+dlib实现嘴唇检测。
**存在问题:**在颜色分配中存在的主要问题是嘴唇的颜色选择平均值,对于嘴唇边缘和人脸相连的地方很违和,由于dlib的嘴唇特征点的数量是有限的,所以在分层的过程中我是依据特征点的分布来的,所以嘴唇上颜色填充的形状会在张开嘴的时候变得有点畸形。
3、代码
# -*- coding: utf-8 -*-
# from gpiozero import LED
from time import sleep
from subprocess import check_call
import cv2 as cv
import numpy as np
import dlib
import threading
import pygame
from pygame.locals import *
import sys
# 声明界面中按钮的界面
# 输入参数
# screen:主界面,按钮界面将绘制于主界面之上
# font:pygame字体全局变量
# color:字体颜色
# button_size:按钮形状,要是元组中包含三个参数则是圆形,四个是矩形
# text:按钮中的文字
# x:按钮界面位于主界面中x的相对位置
# y:按钮界面位于主界面中y的相对位置
class SurfaceButton:
def __init__(self, screen, font, color, button_size, text, x=0, y=0):
self.screen = screen
self.color = color
self.button_size = button_size
self.text = font.render(text, True, (158, 16, 16))
self.surface = pygame.Surface(screen.get_size())
# 配合下面代码使用,使得创建界面默认的黑色背景最终是透明的
self.surface.set_colorkey((0, 0, 0))
self.surfaceX = x
self.surfaceY = y
# 用于调试中显示输入文字
# self.text_show = text
# 把按钮界面绘制到主界面上
def draw(self):
# 绘制矩形按钮
if len(self.button_size) == 4:
pygame.draw.rect(self.surface, self.color, pygame.Rect(self.button_size), width=0)
self.surface.blit(self.text, (
self.button_size[0] - self.text.get_width() // 2, self.button_size[1] - self.text.get_height() // 2))
self.surface.convert_alpha()
self.screen.blit(self.surface, (self.surfaceX, self.surfaceY))
# 绘制圆形按钮
elif len(self.button_size) == 3:
pygame.draw.circle(self.surface, self.color, self.button_size[:2], self.button_size[2], width=0)
self.surface.blit(self.text, (
self.button_size[0] - self.text.get_width() // 2, self.button_size[1] - self.text.get_height() // 2))
self.surface.convert_alpha()
self.screen.blit(self.surface, (self.surfaceX, self.surfaceY))
# 多线程操作
class MyThread(threading.Thread):
def __init__(self, func, args=()):
super(MyThread, self).__init__()
self.func = func
self.args = args
# 在执行函数的同时,把结果赋值给result,然后通过get_result函数获取返回的结果
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result
except Exception as e:
return e
# 利用dlib进行人脸和嘴唇检测并绘制
# 输入参数
# screen:主界面
# img:用于嘴唇检测的图片,格式是opencv的图片格式(BGR)
def lip_detector(img):
landmarks_lip = []
landmarks_lip_rgb = []
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
img_rgb = np.array(img_rgb).transpose(1, 0, 2)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
rects = detector(gray, 1)
print('检测到了 %d 个人脸' % len(rects))
for (i, rect) in enumerate(rects):
# 标记人脸中的68个landmark点
landmarks = predictor(gray, rect)
landmarks_lip_tem = []
landmarks_lip_rgb_tem = []
for n in range(48, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_lip_tem.append((x, y))
landmarks_lip_rgb_tem.append(list(img_rgb[x][y]))
# cv.circle(img=img, center=(x, y), radius=3, color=(0, 255, 0), thickness=-1)
landmarks_lip.append(landmarks_lip_tem)
landmarks_lip_rgb.append(landmarks_lip_rgb_tem)
return landmarks_lip, landmarks_lip_rgb
# 分割嘴唇区域
def split_area(lip_point, landmarks_lip, split_num):
# for index, point_order in enumerate(lip_point):
# print(index,landmarks_lip[point_order])
# print('-------------------------')
lip_point_up = lip_point[1:6]
lip_point_down = lip_point[7:12][::-1]
split_step = []
area_line = []
for i in range(len(lip_point_up)):
# print(landmarks_lip[up_lip_point_down[i]])
split_step.append(
np.array(landmarks_lip[lip_point_down[i]]) - np.array(landmarks_lip[lip_point_up[i]]))
split_step = np.array(split_step) / split_num
# print(split_step)
# print('--------------------------')
for m in range(split_num + 1):
area_tem = []
for n in range(len(lip_point_up)):
if n == 0:
area_tem.append(np.array(landmarks_lip[lip_point[0]]))
# print((landmarks_lip[up_lip_point_up[n]] + split_step[n] * m))
area_tem.append((landmarks_lip[lip_point_up[n]] + split_step[n] * m).astype(np.int16))
elif n == (len(lip_point_up) - 1):
area_tem.append((landmarks_lip[lip_point_up[n]] + split_step[n] * m).astype(np.int16))
area_tem.append(np.array(landmarks_lip[lip_point[6]]))
else:
area_tem.append((landmarks_lip[lip_point_up[n]] + split_step[n] * m).astype(np.int16))
area_line.append(area_tem)
# print(area_line)
return area_line
# 为每一个分割的区域分配颜色
def split_color(lipstick_color, lip_point, landmarks_lip_rgb, split_num):
# print(landmarks_lip_rgb)
color = []
lip_point_up = lip_point[1:6]
lip_point_down = lip_point[7:12][::-1]
lip_point_up_avg = np.zeros((3))
lip_point_down_avg = np.zeros((3))
lip_point_up_step = []
lip_point_down_step = []
for i in range(len(lip_point_up)):
lip_point_up_avg += np.array(landmarks_lip_rgb[i])
lip_point_down_avg += np.array(lip_point_down[i])
lip_point_up_avg = lip_point_up_avg / (len(lip_point_up))
lip_point_down_avg = lip_point_down_avg / (len(lip_point_up))
if split_num % 2 != 0:
lip_point_up_step = (np.array(lipstick_color) - lip_point_up_avg) / ((split_num - 1) / 2)
lip_point_down_step = (lip_point_down_avg - np.array(lipstick_color)) / ((split_num - 1) / 2)
for j in range(split_num):
if j < split_num // 2:
color.append(lip_point_up_avg + lip_point_up_step * j)
elif j == split_num // 2:
color.append(np.array(lipstick_color))
else:
color.append(np.array(lipstick_color) + lip_point_down_step * (j - split_num // 2))
else:
lip_point_up_step = (np.array(lipstick_color) - lip_point_up_avg) / ((split_num - 2) / 2)
lip_point_down_step = (lip_point_down_avg - np.array(lipstick_color)) / ((split_num - 2) / 2)
print(np.array(lipstick_color), lip_point_down_avg)
print(lip_point_up_step, lip_point_down_step)
for j in range(split_num):
if j < (split_num // 2 - 1):
color.append(lip_point_up_avg + lip_point_up_step * j)
elif j == (split_num // 2 - 1):
color.append(np.array(lipstick_color))
elif j == split_num // 2:
color.append(np.array(lipstick_color))
else:
color.append(np.array(lipstick_color) + lip_point_down_step * (j - split_num // 2))
# print('---------------------')
# print(np.array(lipstick_color))
# print(np.array(lipstick_color) + lip_point_down_step * j)
# print('---------------------')
return np.array(color).astype(np.int16)
# 绘制嘴唇线条或填充
def draw_lip(screen, img_makeup, area_line_up, color_up, area_line_down, color_down):
# print(np.array(area_line).reshape(-1,14))
area_lip = []
# 绘制嘴唇线条,opencv中imshow中文显示存在问题
# for i in range(np.array(area_line_up).shape[0]):
# for j in range(np.array(area_line_up).shape[1] - 1):
# cv.line(img, tuple(area_line_up[i][j]), tuple(area_line_up[i][j + 1]),
# color=(0, 255, 0),
# thickness=1, lineType=8)
# cv.imshow(u'嘴唇分割线', img)
# 填充嘴唇
for i in range(np.array(area_line_up).shape[0] - 1):
# print(tuple(color[i][::-1]))
cv.fillPoly(img_makeup, [np.vstack((np.array(area_line_up[i]), np.array(area_line_up[i + 1])[::-1]))],
tuple([int(x) for x in color_up[i][::-1]]))
for j in range(np.array(area_line_down).shape[0] - 1):
# print(tuple(color[i][::-1]))
cv.fillPoly(img_makeup, [np.vstack((np.array(area_line_down[j]), np.array(area_line_down[j + 1])[::-1]))],
tuple([int(x) for x in color_down[j][::-1]]))
# screen.blit(cvimage_to_pygame(cv.cvtColor(img_makeup, cv.COLOR_BGR2RGB)), (0, 0))
# 获取摄像头序号
def search_cap_num():
for i in range(2000):
cap = cv.VideoCapture(i)
cap_opened = cap.isOpened()
if cap_opened == True:
return i
# 获取摄像头所能拍摄的像素
# 注意这个地方输出的shape是高度,宽度,深度,所以要转化一下
def set_screen_size():
ret, img = cap.read()
return img.shape[1::-1]
# 将opencv图片转化为pygame中图片
def cvimage_to_pygame(image):
"""Convert cvimage into a pygame image"""
return pygame.image.frombuffer(image.tobytes(), image.shape[1::-1], "RGB")
# 处理pygame按钮界面的点击事件
def deal_with_event():
global button_makeup_clicked, button_quit_clicked, \
button_circle_quit, button_circle_makeup
for event in pygame.event.get():
if event.type == QUIT:
# 接收到退出事件后退出程序
sys.exit()
if event.type == MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
mouse_pos = pygame.mouse.get_pos()
mouse_x = mouse_pos[0]
mouse_y = mouse_pos[1]
# print(mouse_pos)
# print(button_circle.button_size[0]-button_circle.button_size[2],button_circle.button_size[0]+button_circle.button_size[2],button_circle.button_size[1] - button_circle.button_size[2],button_circle.button_size[1] + button_circle.button_size[2])
if (not button_makeup_clicked) and \
button_circle_makeup.button_size[0] - button_circle_makeup.button_size[2] <= mouse_x <= \
button_circle_makeup.button_size[0] + button_circle_makeup.button_size[2] and \
button_circle_makeup.button_size[1] - button_circle_makeup.button_size[2] <= mouse_y <= \
button_circle_makeup.button_size[1] + button_circle_makeup.button_size[2]:
button_makeup_clicked = True
button_quit_clicked = False
elif (not button_quit_clicked) and \
button_circle_quit.button_size[0] - button_circle_quit.button_size[2] <= mouse_x <= \
button_circle_quit.button_size[0] + button_circle_quit.button_size[2] and \
button_circle_quit.button_size[1] - button_circle_quit.button_size[2] <= mouse_y <= \
button_circle_quit.button_size[1] + button_circle_quit.button_size[2]:
button_quit_clicked = True
button_makeup_clicked = False
cap_num = search_cap_num()
cap = cv.VideoCapture(cap_num)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
lip_order_dlib = np.array([[48, 49, 50, 51, 52, 53, 54, 64, 63, 62, 61, 60, 48],
[48, 59, 58, 57, 56, 55, 54, 64, 65, 66, 67, 60, 48]]) - 48
lip_order_num = lip_order_dlib.shape[1]
# 初始化pygame,为使用硬件做准备
pygame.init()
# 创建一个窗口
screen_video = pygame.display.set_mode(set_screen_size(), 0, 32)
screen_width = screen_video.get_size()[0]
screen_height = screen_video.get_size()[1]
white = (255, 255, 255)
button_circle_pos = [(screen_width - screen_width // 6), screen_height - screen_height // 7]
button_circle_raduis = [screen_height // 10]
button_circle_size = tuple(button_circle_pos + button_circle_raduis)
button_makeup_clicked = False
button_quit_clicked = True
# 字体
font = pygame.font.Font("font.ttf", 24)
# 字体
# font = pygame.font.SysFont(None, 24)
button_circle_makeup = SurfaceButton(screen_video, font, white, button_circle_size, "化妆")
button_circle_quit = SurfaceButton(screen_video, font, white, button_circle_size, "退出")
# 设置窗口标题
pygame.display.set_caption("Hello, World!")
img = {}
split_num = 20
# 主循环
while cap.isOpened():
if button_makeup_clicked:
# if True:
area_line_up = []
area_line_down = []
color_up = []
color_down = []
# img = cv.imread('1.jpg')
img_makeup = img.copy()
landmarks_lip, landmarks_lip_rgb = lip_detector(img)
# cv.imwrite('3.jpg', img)
for i in range(np.array(landmarks_lip).shape[0]):
print(i)
area_line_up_tem = split_area(lip_order_dlib[0], landmarks_lip[i], split_num)
area_line_down_tem = split_area(lip_order_dlib[1], landmarks_lip[i], split_num)
color_up_tem = split_color((255, 0, 0), lip_order_dlib[0], landmarks_lip_rgb[i], split_num)
color_down_tem = split_color((255, 0, 0), lip_order_dlib[1], landmarks_lip_rgb[i], split_num)
area_line_up.append(area_line_up_tem)
area_line_down.append(area_line_down_tem)
color_up.append(color_up_tem)
color_down.append(color_down_tem)
for i in range(np.array(area_line_up).shape[0]):
draw_lip(screen_video, img_makeup, area_line_up[i], color_up[i], area_line_down[i], color_down[i])
screen_video.blit(cvimage_to_pygame(cv.cvtColor(img_makeup, cv.COLOR_BGR2RGB)), (0, 0))
button_circle_quit.draw()
pygame.display.flip()
while True:
deal_with_event()
if button_quit_clicked:
break
elif button_quit_clicked:
ret, img = cap.read()
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
screen_video.blit(cvimage_to_pygame(img_rgb), (0, 0))
button_circle_makeup.draw()
deal_with_event()
pygame.display.flip()
4、实现的效果
感觉不能说很违和把,只能说非常违和哈哈哈
5、改变方向
我觉得这种就应该是用神经网络来实现,这种方法永远做不到和人脸直接贴合的感觉。