openpose遍历

from future import division
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import os

if not os.path.exists(“./bianli/input”): # 创建文件夹
os.makedirs(“./bianli/input”)
if not os.path.exists(“./bianli/output”):
os.makedirs(“./bianli/output”)

class general_pose_model(object):
def init(self, modelpath, mode=“MPI”): # 选择指定模型:BODY25、COCO、MPI
# 指定采用的模型
# Body25: 25 points
# COCO: 18 points
# MPI: 15 points
self.inWidth = 368
self.inHeight = 368
self.threshold = 0.1
if mode == “BODY25”:
self.pose_net = self.general_body25_model(modelpath)
elif mode == “COCO”:
self.pose_net = self.general_coco_model(modelpath)
elif mode == “MPI”:
self.pose_net = self.get_mpi_model(modelpath)

def get_mpi_model(self, modelpath):
    self.points_name = {                           # 点号表示关节的名称
        "Head": 0, "Neck": 1, 
        "RShoulder": 2, "RElbow": 3, "RWrist": 4,
        "LShoulder": 5, "LElbow": 6, "LWrist": 
        7, "RHip": 8, "RKnee": 9, "RAnkle": 10, 
        "LHip": 11, "LKnee": 12, "LAnkle": 13, 
        "Chest": 14, "Background": 15 }
    self.num_points = 15                           # 识别关键点的个数
    self.point_pairs = [[0, 1], [1, 2], [2, 3],    # 关键点连接顺序
                        [3, 4], [1, 5], [5, 6], 
                        [6, 7], [1, 14],[14, 8], 
                        [8, 9], [9, 10], [14, 11], 
                        [11, 12], [12, 13]
                        ]
    prototxt = os.path.join(      # 加载训练好的模型
        modelpath,
        "D:\opencv\opencv\sources\samples\dnn\mpi\pose_deploy_linevec.prototxt")
    caffemodel = os.path.join(
        modelpath, 
        "D:\opencv\opencv\sources\samples\dnn\mpi\pose_iter_160000.caffemodel")
    mpi_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)

    return mpi_model


def general_coco_model(self, modelpath):
    self.points_name = {
        "Nose": 0, "Neck": 1, 
        "RShoulder": 2, "RElbow": 3, "RWrist": 4,
        "LShoulder": 5, "LElbow": 6, "LWrist": 7, 
        "RHip": 8, "RKnee": 9, "RAnkle": 10, 
        "LHip": 11, "LKnee": 12, "LAnkle": 13, 
        "REye": 14, "LEye": 15, 
        "REar": 16, "LEar": 17, 
        "Background": 18}
    self.num_points = 18
    self.point_pairs = [[1, 0], [1, 2], [1, 5], 
                        [2, 3], [3, 4], [5, 6], 
                        [6, 7], [1, 8], [8, 9],
                        [9, 10], [1, 11], [11, 12], 
                        [12, 13], [0, 14], [0, 15], 
                        [14, 16], [15, 17]]
    prototxt   = os.path.join(
        modelpath, 
        "D:\opencv\opencv\sources\samples\dnn\coco\pose_deploy_linevec.prototxt")
    caffemodel = os.path.join(
        modelpath, 
        "D:\opencv\opencv\sources\samples\dnn\coco\pose_iter_440000.caffemodel")
    coco_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)

    return coco_model


def general_body25_model(self, modelpath):
    self.num_points = 25
    self.point_pairs = [[1, 0], [1, 2], [1, 5], 
                        [2, 3], [3, 4], [5, 6], 
                        [6, 7], [0, 15], [15, 17], 
                        [0, 16], [16, 18], [1, 8],
                        [8, 9], [9, 10], [10, 11], 
                        [11, 22], [22, 23], [11, 24],
                        [8, 12], [12, 13], [13, 14], 
                        [14, 19], [19, 20], [14, 21]]
    prototxt   = os.path.join(
        modelpath, 
        "C:\\Users\\lenovo\\Desktop\\bishe\\myopenpose\\models\\pose\\body_25\\pose_deploy.prototxt")
    caffemodel = os.path.join(
        modelpath, 
        "C:\\Users\\lenovo\\Desktop\\OpenPose_models\\pose\\body_25\\pose_iter_584000.caffemodel")
    body25_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)

    return body25_model


def predict(self, imgfile):      
    img_cv2 = cv2.imread(imgfile)                    
    img_height, img_width, _ = img_cv2.shape
    inpBlob = cv2.dnn.blobFromImage(img_cv2, 
                                    1.0 / 255, 
                                    (self.inWidth, self.inHeight),
                                    (0, 0, 0), 
                                    swapRB=False, 
                                    crop=False)
    self.pose_net.setInput(inpBlob)
    self.pose_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
    self.pose_net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)  

    output = self.pose_net.forward()

    H = output.shape[2]
    W = output.shape[3]
    print(output.shape)

    # vis heatmaps     
    # self.vis_heatmaps(img_file, output)    
    #
    points = []      # 创建空列表points,用来存放关节点坐标
    for idx in range(self.num_points):
        probMap = output[0, idx, :, :] # confidence map.

        # Find global maxima of the probMap.    #  找到probMap的全局最大值
        minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

        # Scale the point to fit on the original image  #缩放点以适合原始图像
        x = (img_width * point[0]) / W
        y = (img_height * point[1]) / H

        if prob > self.threshold:
            points.append((int(x), int(y)))
        else:
            points.append(None)
    print(points)                        ######### 输出检测到的关键点坐标
    print(points, file=f)                ######### 将关键点坐标保存到f中
    return points


""" def vis_heatmaps(self, imgfile, net_outputs):           
    img_cv2 = cv2.imread(imgfile)
    plt.figure( num='heatmaps',figsize=[10, 10])      
    for pdx in range(self.num_points):
        probMap = net_outputs[0, pdx, :, :]
        probMap = cv2.resize(
            probMap, 
            (img_cv2.shape[1], img_cv2.shape[0])
        )
        plt.subplot(5, 5, pdx+1)                   # 表示将图片分成5行,5列,当前图像的位置
        plt.imshow(cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB))
        plt.imshow(probMap, alpha=0.6)
        plt.colorbar()
        plt.axis("off")                            # 关闭坐标轴显示
    plt.savefig("D:/openpose/bianli/output/"+image_id+"_heatmap.jpg")     # 保存关节点预测的概率图(一定要先保存再show,如果先show再保存的话会保存为空白图片)
    plt.show() """
    
def vis_pose(self, imgfile, points):
    img_cv2 = cv2.imread(imgfile)
    img_cv2_copy = np.copy(img_cv2)
    for idx in range(len(points)):
        if points[idx]:
            cv2.circle(img_cv2_copy,             # 给指定的图片画圆(图片)
                       points[idx],              # 圆心位置(检测到的关键点位置)
                       8,                        # 圆的半径
                       (0, 255, 255),            # 圆的颜色
                       thickness=-1,             # 圆形轮廓粗细(如果为正),负厚度表示要绘制实心圆
                       lineType=cv2.FILLED)      # 圆边界类型
            cv2.putText(img_cv2_copy,                # 给指定图片添加文字(图片)
                        "{}".format(idx),            # 文字内容(检测到的关键点编号)
                        points[idx],                 # 文字位置(检测到的关键点位置)
                        cv2.FONT_HERSHEY_SIMPLEX,    # 字体
                        1,                           # 字体大小
                        (0, 0, 255),                 # 字体颜色
                        2,                           # 字体粗细
                        lineType=cv2.LINE_AA)
    # cv2.imshow('point',img_cv2_copy)
    # cv2.waitKey(0)
    cv2.imwrite("./bianli/output/"+image_id+"_pose.jpg",img_cv2_copy)       # 保存关节点图

    # Draw Skeleton   
    for pair in self.point_pairs:
        partA = pair[0]
        partB = pair[1]

        if points[partA] and points[partB]:
            cv2.line(img_cv2, 
                     points[partA], 
                     points[partB], 
                     (0, 255, 255), 3)
            cv2.circle(img_cv2, 
                       points[partA], 
                       8, 
                       (0, 0, 255), 
                       thickness=-1, 
                       lineType=cv2.FILLED)
    # cv2.imshow('skeleton',img_cv2)
    # cv2.waitKey(0)
    cv2.imwrite("./bianli/output/"+image_id+"_skeleton.jpg",img_cv2)        # 保存骨骼图

    """ plt.figure(num='result', figsize=[10, 10])                 
    plt.subplot(1, 2, 1)                                       
    plt.imshow(cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB))
    plt.axis("off")
    plt.subplot(1, 2, 2)                                       
    plt.imshow(cv2.cvtColor(img_cv2_copy, cv2.COLOR_BGR2RGB))
    plt.axis("off")
    plt.savefig("D:/openpose/bianli/output/"+image_id+"_result.jpg")
    plt.show() """

def handup(self,imgfile,point):

    img_cv2 = cv2.imread(imgfile)
    # print("手腕的坐标是:",point[4][1])
    # print()
    # print("颈部的纵坐标是:",point[1][1])

    if point[4] and point[1] and point[4][1]<point[1][1]:
        cv2.putText(img_cv2, 'HANDS UP!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
        cv2.imwrite("./bianli/output/"+image_id+"_pose.jpg",img_cv2)       # 保存关节点图


def duanzheng(self,imgfile,point):
    img_cv2 = cv2.imread(imgfile)
    

    if  abs(point[2][1]-point[5][1])<100 and abs(point[3][1]-point[6][1])<100 and abs(point[4][1]-point[3][1])<100 and abs(point[4][1]-point[6][1])<100:
        cv2.putText(img_cv2, 'sitting upright!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
        cv2.imwrite("./bianli/output/"+image_id+"_pose.jpg",img_cv2)       # 保存关节点图

if name == ‘main’:
image_ids = open(‘./bianli/names.txt’).read().strip().split() # names.txt中存放待检测图片名称,图片名字不包含后缀

f = open('./bianli/result.txt', 'a')                           # 打开result.txt文件(用来存放point坐标)
for image_id in image_ids:

    img_file = "./bianli/input/" + image_id + ".jpg"                        # 打开遍历文件夹中的待检测图像
    # img = cv2.imread(img_file)
    # cv2.imshow('img', img)
    # cv2.waitKey(0)

    modelpath = "./myopenpose/models/pose/"
    pose_model = general_pose_model(modelpath, mode="MPI")      
    res_points = pose_model.predict(img_file)
    pose_model.vis_pose(img_file, res_points)
    
    pose_model.handup(img_file,res_points)
    

    print(image_id, '-----------------------------')
    print(image_id, '-----------------------------', file=f, flush=True)
print("Conversion completed!")   
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值