densepose 推理

推理执行代码:

python inference_dp_contour.py show configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml ./weights/R_50_FPN_WC1M_s1x.pkl ./test_pics/gao1.jpg --output ./saved/saved_face2.png

推理函数(inference_dp_contour.py)的代码:

import argparse
import glob
import logging
import os
import pickle
import sys
from typing import Any, ClassVar, Dict, List
import torch

from detectron2.config import CfgNode, get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.structures.instances import Instances
from detectron2.utils.logger import setup_logger

from densepose import add_densepose_config
from densepose_results_funcs import CompoundVisualizer
from densepose_results_funcs import DensePoseResultsMplContourVisualizer
from densepose.vis.extractor import (
    DensePoseResultExtractor,
)

DOC = """Apply Net - a tool to print / visualize DensePose results
"""

LOGGER_NAME = "apply_net"
logger = logging.getLogger(LOGGER_NAME)

_ACTION_REGISTRY: Dict[str, "Action"] = {}

class Action(object):
    @classmethod
    def add_arguments(cls: type, parser: argparse.ArgumentParser):
        parser.add_argument(
            "-v",
            "--verbosity",
            action="count",
            help="Verbose mode. Multiple -v options increase the verbosity.",
        )

def register_action(cls: type):
    """
    Decorator for action classes to automate action registration
    """
    global _ACTION_REGISTRY
    _ACTION_REGISTRY[cls.COMMAND] = cls
    return cls

class InferenceAction(Action):
    @classmethod
    def add_arguments(cls: type, parser: argparse.ArgumentParser):
        super(InferenceAction, cls).add_arguments(parser)
        parser.add_argument("cfg", metavar="<config>", help="Config file")
        parser.add_argument("model", metavar="<model>", help="Model file")
        parser.add_argument("input", metavar="<input>", help="Input data")
        parser.add_argument(
            "--opts",
            help="Modify config options using the command-line 'KEY VALUE' pairs",
            default=[],
            nargs=argparse.REMAINDER,
        )

    @classmethod
    def execute(cls: type, args: argparse.Namespace):
        logger.info(f"Loading config from {args.cfg}")
        opts = []
        cfg = cls.setup_config(args.cfg, args.model, args, opts)
        logger.info(f"Loading model from {args.model}")
        predictor = DefaultPredictor(cfg)

        img = read_image(args.input, format="BGR")  # predictor expects BGR image.
        with torch.no_grad():
            outputs = predictor(img)["instances"]
            cls.execute_on_outputs(args, {"file_name": args.input, "image": img}, outputs, cfg)

    @classmethod
    def setup_config(
        cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
    ):
        cfg = get_cfg()
        add_densepose_config(cfg)
        cfg.merge_from_file(config_fpath)
        cfg.merge_from_list(args.opts)
        if opts:
            cfg.merge_from_list(opts)
        cfg.MODEL.WEIGHTS = model_fpath
        cfg.freeze()
        return cfg

@register_action
class ShowAction(InferenceAction):
    """
    Show action that visualizes selected entries on an image
    """

    COMMAND: ClassVar[str] = "show"

    @classmethod
    def add_parser(cls: type, subparsers: argparse._SubParsersAction):
        parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
        cls.add_arguments(parser)
        parser.set_defaults(func=cls.execute)

    @classmethod
    def add_arguments(cls: type, parser: argparse.ArgumentParser):
        super(ShowAction, cls).add_arguments(parser)
        parser.add_argument(
            "--output",
            metavar="<image_file>",
            default="outputres.png",
            help="File name to save output to",
        )

    @classmethod
    def setup_config(
        cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
    ):
        opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
        opts.append(str(0.8))
        cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts)
        return cfg

    @classmethod
    def execute_on_outputs(
        cls: type, args: argparse.Namespace, entry: Dict[str, Any], outputs: Instances, cfg: CfgNode
        ):
        import cv2
        import numpy as np

        # vis = DensePoseResultsContourVisualizer(
        #     cfg=cfg,
        #     texture_atlas=None,
        #     texture_atlases_dict=None,
        # )
        vis = DensePoseResultsMplContourVisualizer(
            cfg=cfg,
            texture_atlas=None,
            texture_atlases_dict=None,
        )

        visualizer = CompoundVisualizer([vis])
        extractor = CompoundExtractor(DensePoseResultExtractor())

        image_fpath = entry["file_name"]
        logger.info(f"Processing {image_fpath}")
        image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
        image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
        data = extractor(outputs)

        image_vis = visualizer.visualize(image, data)
        entry_idx = 1
        out_fname = cls._get_out_fname(entry_idx, args.output)
        out_dir = os.path.dirname(out_fname)
        if len(out_dir) > 0 and not os.path.exists(out_dir):
            os.makedirs(out_dir)
        cv2.imwrite(out_fname, image_vis)
        logger.info(f"Output saved to {out_fname}")

    @classmethod
    def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
        base, ext = os.path.splitext(fname_base)
        return base + ".{0:04d}".format(entry_idx) + ext

class CompoundExtractor(object):
    """
    Extracts data for CompoundVisualizer
    """

    def __init__(self, extractors):
        self.extractors = extractors

    def __call__(self, instances: Instances, select=None):
        data = self.extractors(instances, select)
        return [data]

def create_argument_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(
        description=DOC,
        formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
    )
    parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
    subparsers = parser.add_subparsers(title="Actions")
    for _, action in _ACTION_REGISTRY.items():
        action.add_parser(subparsers)
    return parser

def main():
    parser = create_argument_parser()
    args = parser.parse_args()
    global logger
    logger = setup_logger(name=LOGGER_NAME)
    logger.setLevel(logging.INFO)
    args.func(args)

if __name__ == "__main__":
    main()


推理代码的依赖函数(densepose_results_funcs.py):

import logging
import numpy as np
from typing import List, Optional, Tuple
import cv2
import torch
from dataclasses import dataclass

Image = np.ndarray
Boxes = torch.Tensor

## 推理 IUV 的函数
@dataclass
class DensePoseChartResult:
    """
    DensePose results for chart-based methods represented by labels and inner
    coordinates (U, V) of individual charts. Each chart is a 2D manifold
    that has an associated label and is parameterized by two coordinates U and V.
    Both U and V take values in [0, 1].
    Thus the results are represented by two tensors:
    - labels (tensor [H, W] of long): contains estimated label for each pixel of
        the detection bounding box of size (H, W)
    - uv (tensor [2, H, W] of float): contains estimated U and V coordinates
        for each pixel of the detection bounding box of size (H, W)
    """

    labels: torch.Tensor
    uv: torch.Tensor

    def to(self, device: torch.device):
        """
        Transfers all tensors to the given device
        """
        labels = self.labels.to(device)
        uv = self.uv.to(device)
        return DensePoseChartResult(labels=labels, uv=uv)

class DensePoseResultsVisualizer(object):
    def visualize(
        self,
        image_bgr: Image,
        results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]],
    ) -> Image:
        densepose_result, boxes_xywh = results_and_boxes_xywh
        if densepose_result is None or boxes_xywh is None:
            return image_bgr

        boxes_xywh = boxes_xywh.cpu().numpy()
        context = self.create_visualization_context(image_bgr)
        for i, result in enumerate(densepose_result):
            iuv_array = torch.cat(
                (result.labels[None].type(torch.float32), result.uv * 255.0)
            ).type(torch.uint8)
            self.visualize_iuv_arr(context, iuv_array.cpu().numpy(), boxes_xywh[i])
        image_bgr = self.context_to_image_bgr(context)
        return image_bgr

    def create_visualization_context(self, image_bgr: Image):
        return image_bgr

    def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
        print("DensePoseResultsVisualizer vis iuv! 1")
        pass

    def context_to_image_bgr(self, context):
        return context

    def get_image_bgr_from_context(self, context):
        return context


def _extract_i_from_iuvarr(iuv_arr):
    return iuv_arr[0, :, :]


def _extract_u_from_iuvarr(iuv_arr):
    return iuv_arr[1, :, :]


def _extract_v_from_iuvarr(iuv_arr):
    return iuv_arr[2, :, :]


class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer):
    def __init__(self, levels=10, **kwargs):
        self.levels = levels
        self.plot_args = kwargs

    def create_visualization_context(self, image_bgr: Image):
        import matplotlib.pyplot as plt
        from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas

        context = {}
        context["image_bgr"] = image_bgr
        dpi = 100
        height_inches = float(image_bgr.shape[0]) / dpi
        width_inches = float(image_bgr.shape[1]) / dpi
        fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi)
        plt.axes([0, 0, 1, 1])
        plt.axis("off")
        context["fig"] = fig
        canvas = FigureCanvas(fig)
        context["canvas"] = canvas
        extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0)
        plt.imshow(image_bgr[:, :, ::-1], extent=extent)
        return context

    def context_to_image_bgr(self, context):
        fig = context["fig"]
        w, h = map(int, fig.get_size_inches() * fig.get_dpi())
        canvas = context["canvas"]
        canvas.draw()
        image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8")
        image_rgb = image_1d.reshape(h, w, 3)
        image_bgr = image_rgb[:, :, ::-1].copy()
        return image_bgr

    def save_left_eye(self, img, iuv_list, I, U, V):
        img_cut = img.copy()
        k = 0
        for i in range(img_cut.shape[0]):
            for j in range(img_cut.shape[1]):
                if I[i,j] == iuv_list[k][0] and U[i,j] == iuv_list[k][1] and V[i,j] == iuv_list[k][2]:
                    cv2.circle(img_cut, (j, i), 1, (0, 0, 255), 0)
                    k += 1
                    if k == len(iuv_list):
                        return img_cut
        return img_cut

    def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None:
        import matplotlib.pyplot as plt

        # np.save('iuv_gao1.npy', iuv_arr)
        print('bbox_xywh: ', bbox_xywh)

        u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
        v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
        extent = (
            bbox_xywh[0],
            bbox_xywh[0] + bbox_xywh[2],
            bbox_xywh[1],
            bbox_xywh[1] + bbox_xywh[3],
        )
        plt.contour(u, self.levels, extent=extent, **self.plot_args)
        plt.contour(v, self.levels, extent=extent, **self.plot_args)

        # 分析iuv,找到左眼位置,并在原图中保存下来
        image_ori = context["image_bgr"]
        I = _extract_i_from_iuvarr(iuv_arr)
        x, y ,w, h = int(bbox_xywh[0]), int(bbox_xywh[1]), int(bbox_xywh[2]), int(bbox_xywh[3])
        person_img = image_ori[y:y+h, x:x+w]
        iuv_list = np.load('./left_eye_iuv.npy')
        img_cut = self.save_left_eye(person_img, iuv_list, I, u, v)
        cv2.imwrite('./img_left_eye.png', img_cut)


## 其他依赖函数
class CompoundVisualizer(object):
    def __init__(self, visualizers):
        self.visualizers = visualizers

    def visualize(self, image_bgr, data):
        assert len(data) == len(
            self.visualizers
        ), "The number of datas {} should match the number of visualizers" " {}".format(
            len(data), len(self.visualizers)
        )
        image = image_bgr
        for i, visualizer in enumerate(self.visualizers):
            image = visualizer.visualize(image, data[i])
        return image

    def __str__(self):
        visualizer_str = ", ".join([str(v) for v in self.visualizers])
        return "Compound Visualizer [{}]".format(visualizer_str)



  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值