如何將人臉變漂亮(七)

10 篇文章 0 订阅
7 篇文章 0 订阅

利用 mediapipe 進行處理
規劃
1.先把人臉辨識,然後取出框框
2.把框框內的人臉,進行美容
-高反差保留
(1)曝光度調整
(2)綠色與藍色,疊加
(3)YUCIHighPassSkinSmoothingMaskBoost
-調整圖像亮度
-混合
3.把人臉的嘴巴,進行塗紅 (太醜了)
4.把人臉的眼睛塗黑
5.新增去背景,用綠幕,然後塞入OBS內,這樣就可以直播了
6.把功能改寫為pyqt5,以利後續使用
(1) 用designer 劃一個 mainwindow.py ( 用pyuic5 轉換)
(2) 主程式呼叫 main.py (呼叫 mainwindow.py)
(3) 用個thread 拉 camera 攝影機 (init, run ,stop)
(4) 用個 frame 接收影像處理資訊

資料出處,請參考 https://www.samproell.io/posts/yarppg/yarppg-face-detection-with-mediapipe/

下一篇再來修改之前內容。

# main.py
from PyQt5.QtWidgets import QApplication

from mainwindow import MainWindow
from rppg import RPPG

if __name__ == "__main__":
    app = QApplication([])
    rppg = RPPG(video=0, parent=app)
    win = MainWindow(rppg=rppg)
    win.show()

    rppg.start()
    app.exec_()
    rppg.stop()
# mainwindow.py

from PyQt5.QtWidgets import QMainWindow
import pyqtgraph as pg
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh


class MainWindow(QMainWindow):
    def __init__(self, rppg):
        """MainWindow visualizing the output of the RPPG model.
        """
        super().__init__()

        rppg.rppg_updated.connect(self.on_rppg_updated)
        self.init_ui()

    def on_rppg_updated(self, output):
        """Update UI based on RppgResults.
        """
        img = output.rawimg.copy()
        draw_facemesh(img, output.landmarks, tesselate=True, contour=True)
        self.img.setImage(img)


    def init_ui(self):
        """Initialize window with pyqtgraph image view box in the center.
        """
        self.setWindowTitle("FaceMesh detection in PyQt")

        layout = pg.GraphicsLayoutWidget()
        self.img = pg.ImageItem(axisOrder="row-major")
        vb = layout.addViewBox(invertX=True, invertY=True, lockAspect=True)
        vb.addItem(self.img)

        self.setCentralWidget(layout)


def draw_facemesh(img, results, tesselate=False,
                  contour=False, irises=False):
    """Draw all facemesh landmarks found in an image.

    Irises are only drawn if the corresponding landmarks are present,
    which requires FaceMesh to be initialized with refine=True.
    """
    if results is None or results.multi_face_landmarks is None:
        return

    for face_landmarks in results.multi_face_landmarks:
        if tesselate:
            mp.solutions.drawing_utils.draw_landmarks(
                image=img,
                landmark_list=face_landmarks,
                connections=mp_face_mesh.FACEMESH_TESSELATION,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp_drawing_styles
                .get_default_face_mesh_tesselation_style())
        if contour:
            mp.solutions.drawing_utils.draw_landmarks(
                image=img,
                landmark_list=face_landmarks,
                connections=mp.solutions.face_mesh.FACEMESH_CONTOURS,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp.solutions.drawing_styles
                .get_default_face_mesh_contours_style())
        if irises and len(face_landmarks) > 468:
            mp.solutions.drawing_utils.draw_landmarks(
                image=img,
                landmark_list=face_landmarks,
                connections=mp_face_mesh.FACEMESH_IRISES,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp_drawing_styles
                .get_default_face_mesh_iris_connections_style())

# rppg.py

from collections import namedtuple
import numpy as np
from PyQt5.QtCore import pyqtSignal, QObject
import mediapipe as mp

from camera import Camera

RppgResults = namedtuple("RppgResults", ["rawimg", "landmarks"])

class RPPG(QObject):

    rppg_updated = pyqtSignal(RppgResults)

    def __init__(self, parent=None, video=0):
        """rPPG model processing incoming frames and emitting calculation
        outputs.

        The signal RPPG.updated provides a named tuple RppgResults containing
          - rawimg: the raw frame from camera
          - landmarks: multiface_landmarks object returned by FaceMesh
        """
        super().__init__(parent=parent)

        self._cam = Camera(video=video, parent=parent)
        self._cam.frame_received.connect(self.on_frame_received)

        self.detector = mp.solutions.face_mesh.FaceMesh(
            max_num_faces=1,
            refine_landmarks=False,
            min_detection_confidence=0.5,
            min_tracking_confidence=0.5
        )

    def on_frame_received(self, frame):
        """Process new frame - find face mesh and emit outputs.
        """
        rawimg = frame.copy()
        results = self.detector.process(frame)

        self.rppg_updated.emit(RppgResults(rawimg, results))

    def start(self):
        """Launch the camera thread.
        """
        self._cam.start()

    def stop(self):
        """Stop the camera thread and clean up the detector.
        """
        self._cam.stop()
        self.detector.close()

# camera.py

import time

import cv2
import numpy as np
from PyQt5.QtCore import QThread, pyqtSignal


class Camera(QThread):
    """Wraps cv2.VideoCapture and emits Qt signal with frames in RGB format.

    The `run` function launches a loop that waits for new frames in the
    VideoCapture and emits them with a `frame_received` signal.  Calling
    `stop` stops the loop and releases the camera.
    """
    frame_received = pyqtSignal(np.ndarray)
    """PyQt Signal emitting new frames read from the camera.
    """

    def __init__(self, video=0, parent=None):
        """Initialize Camera instance.

        Args:
            video (int or string): ID of camera or video filename
            parent (QObject): parent object in Qt context
        """
        super().__init__(parent=parent)

        self._cap = cv2.VideoCapture(video)
        self._running = False

    def run(self):
        """Start loop in thread capturing incoming frames.
        """
        self._running = True
        while self._running:
            ret, frame = self._cap.read()

            if not ret:
                self._running = False
                raise RuntimeError("No frame received")

            self.frame_received.emit(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

    def stop(self):
        """Stop loop and release camera.
        """
        self._running = False
        time.sleep(0.1)
        self._cap.release()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值