smpl_layer 代码

目录

骨骼点位置:

骨骼顶点

smpl_layer.py

smpl_layer注释版:


骨骼点位置:

kwargs_pose['global_orient'] = self.bm_x.global_orient.repeat(bs,1)
kwargs_pose['body_pose'] = pose[:,1:22].flatten(1)
kwargs_pose['left_hand_pose'] = pose[:,22:37].flatten(1)
kwargs_pose['right_hand_pose'] = pose[:,37:52].flatten(1)
kwargs_pose['jaw_pose'] = pose[:,52:53].flatten(1)#颌部姿态

if expression is not None:
    kwargs_pose['expression'] = expression.flatten(1) # [bs,10]
else:
    kwargs_pose['expression'] = self.bm_x.expression.repeat(bs,1)

# default - to be generalized
kwargs_pose['leye_pose'] = self.bm_x.leye_pose.repeat(bs,1)
kwargs_pose['reye_pose'] = self.bm_x.reye_pose.repeat(bs,1)   

骨骼顶点

verts = output.vertices
j3d = output.joints # 45 joints

smpl_layer.py

# Multi-HMR
# Copyright (c) 2024-present NAVER Corp.
# CC BY-NC-SA 4.0 license

import torch
from torch import nn
from torch import nn
import smplx
import torch
import numpy as np
import utils
from utils import inverse_perspective_projection, perspective_projection
import roma
import pickle
import os
from utils.constants import SMPLX_DIR

class SMPL_Layer(nn.Module):
    """
    Extension of the SMPL Layer with information about the camera for (inverse) projection the camera plane.
    """
    def __init__(self, 
                 type='smplx', 
                 gender='neutral', 
                 num_betas=10,
                 kid=False,
                 person_center=None,
                 *args, 
                 **kwargs,
                 ):
        super().__init__()

        # Args
        assert type == 'smplx'
        self.type = type
        self.kid = kid
        self.num_betas = num_betas
        self.bm_x = smplx.create(SMPLX_DIR, 'smplx', gender=gender, use_pca=False, flat_hand_mean=True, num_betas=num_betas)

        # Primary keypoint - root
        self.joint_names = eval(f"utils.get_{self.type}_joint_names")()
        self.person_center = person_center
        self.person_center_idx = None
        if self.person_center is not None:
            self.person_center_idx = self.joint_names.index(self.person_center)

    def forward(self,
                pose, shape,
                loc, dist, transl,
                K,
                expression=None, # facial expression
                ):
        """
        Args:
            - pose: pose of the person in axis-angle - torch.Tensor [bs,24,3]
            - shape: torch.Tensor [bs,10]
            - loc: 2D location of the pelvis in pixel space - torch.Tensor [bs,2]
            - dist: distance of the pelvis from the camera in m - torch.Tensor [bs,1]
        Return:
            - dict containing a bunch of useful information about each person
        """
        
        if loc is not None and dist is not None:
            assert pose.shape[0] == shape.shape[0] == loc.shape[0] == dist.shape[0]
        if self.type == 'smpl':
            assert len(pose.shape) == 3 and list(pose.shape[1:]) == [24,3]
        elif self.type == 'smplx':
            assert len(pose.shape) == 3 and list(pose.shape[1:]) == [53,3] # taking root_orient, body_pose, lhand, rhan and jaw for the moment
        else:
            raise NameError
        assert len(shape.shape) == 2 and (list(shape.shape[1:]) == [self.num_betas] or list(shape.shape[1:]) == [self.num_betas+1])
        if loc is not None and dist is not None:
            assert len(loc.shape) == 2 and list(loc.shape[1:]) == [2]
            assert len(dist.shape) == 2 and list(dist.shape[1:]) == [1]

        bs = pose.shape[0]

        out = {}

        # No humans
        if bs == 0:
            return {}
        
        # Low dimensional parameters        
        kwargs_pose = {
            'betas': shape,
        }
        kwargs_pose['global_orient'] = self.bm_x.global_orient.repeat(bs,1)
        kwargs_pose['body_pose'] = pose[:,1:22].flatten(1)
        kwargs_pose['left_hand_pose'] = pose[:,22:37].flatten(1)
        kwargs_pose['right_hand_pose'] = pose[:,37:52].flatten(1)
        kwargs_pose['jaw_pose'] = pose[:,52:53].flatten(1)

        if expression is not None:
            kwargs_pose['expression'] = expression.flatten(1) # [bs,10]
        else:
            kwargs_pose['expression'] = self.bm_x.expression.repeat(bs,1)

        # default - to be generalized
        kwargs_pose['leye_pose'] = self.bm_x.leye_pose.repeat(bs,1)
        kwargs_pose['reye_pose'] = self.bm_x.reye_pose.repeat(bs,1)        
        
        # Forward using the parametric 3d model SMPL-X layer
        output = self.bm_x(**kwargs_pose)
        verts = output.vertices
        j3d = output.joints # 45 joints
        R = roma.rotvec_to_rotmat(pose[:,0])

        # Apply global orientation on 3D points
        pelvis = j3d[:,[0]]
        j3d = (R.unsqueeze(1) @ (j3d - pelvis).unsqueeze(-1)).squeeze(-1)
        
        # Apply global orientation on 3D points - bis
        verts = (R.unsqueeze(1) @ (verts - pelvis).unsqueeze(-1)).squeeze(-1)

        # Location of the person in 3D
        if transl is None:
            if K.dtype == torch.float16:
                # because of torch.inverse - not working with float16 at the moment
                transl = inverse_perspective_projection(loc.unsqueeze(1).float(), K.float(), dist.unsqueeze(1).float())[:,0]    
                transl = transl.half()
            else:
                transl = inverse_perspective_projection(loc.unsqueeze(1), K, dist.unsqueeze(1))[:,0]

        # Updating transl if we choose a certain person center
        transl_up = transl.clone()

        # Definition of the translation depend on the args: 1) vanilla SMPL - 2) computed from a given joint
        if self.person_center_idx is None:
            # Add pelvis to transl - standard way for SMPLX layer
            transl_up = transl_up + pelvis[:,0]
        else:
            # Center around the joint because teh translation is computed from this joint
            person_center = j3d[:, [self.person_center_idx]]
            verts = verts - person_center
            j3d = j3d - person_center

        # Moving into the camera coordinate system
        j3d_cam = j3d + transl_up.unsqueeze(1)
        verts_cam = verts + transl_up.unsqueeze(1)

        # Projection in camera plane
        j2d = perspective_projection(j3d_cam, K)
        v2d = perspective_projection(verts_cam, K)

        out.update({
            'v3d': verts_cam, # in 3d camera space
            'j3d': j3d_cam, # in 3d camera space
            'j2d': j2d, 
            'v2d': v2d, 
            'transl': transl, # translation of the primary keypoint
            'transl_pelvis': j3d_cam[:,[0]], # root=pelvis
        })
            
        return out

smpl_layer注释版:

import torch
import torch.nn as nn
import smplx
from utils.constants import SMPLX_DIR

class SMPL_Layer(nn.Module):
    """
    扩展的 SMPL 层,包含有关相机的信息,用于将其投影到相机平面。
    """
    def __init__(self, 
                 type='smplx', 
                 gender='neutral', 
                 num_betas=10,
                 kid=False,
                 person_center=None,
                 *args, 
                 **kwargs):
        super().__init__()

        # 初始化属性
        assert type == 'smplx'  # 确保类型是 'smplx'
        self.type = type
        self.kid = kid
        self.num_betas = num_betas
        
        # 创建 SMPL-X 模型
        self.bm_x = smplx.create(SMPLX_DIR, 'smplx', gender=gender, use_pca=False, flat_hand_mean=True, num_betas=num_betas)

        # 加载 SMPL-X 的关节点名称
        self.joint_names = eval(f"utils.get_{self.type}_joint_names")()
        self.person_center = person_center
        self.person_center_idx = None
        if self.person_center is not None:
            self.person_center_idx = self.joint_names.index(self.person_center)

    def forward(self, pose, shape, loc, dist, transl, K, expression=None):
        """
        参数:
            - pose: 以轴角表示的人的姿态 - torch.Tensor [bs, 53, 3] (针对 SMPL-X)
            - shape: 形状参数 - torch.Tensor [bs, 10]
            - loc: 骨盆在像素空间中的 2D 位置 - torch.Tensor [bs, 2]
            - dist: 骨盆到相机的距离(米) - torch.Tensor [bs, 1]
            - transl: 平移向量 - torch.Tensor [bs, 3]
            - K: 相机内参矩阵 - torch.Tensor [3, 3]
            - expression: 面部表情参数 - torch.Tensor [bs, 10] (可选)
        返回:
            - 包含每个人的许多有用信息的字典
        """
        
        # 确保批量大小匹配
        if loc is not None and dist is not None:
            assert pose.shape[0] == shape.shape[0] == loc.shape[0] == dist.shape[0]
        if self.type == 'smpl':
            assert len(pose.shape) == 3 and list(pose.shape[1:]) == [24, 3]
        elif self.type == 'smplx':
            assert len(pose.shape) == 3 and list(pose.shape[1:]) == [53, 3]  # SMPL-X 特殊参数
        else:
            raise NameError
        
        assert len(shape.shape) == 2 and (list(shape.shape[1:]) == [self.num_betas] or list(shape.shape[1:]) == [self.num_betas + 1])
        if loc is not None and dist is not None:
            assert len(loc.shape) == 2 and list(loc.shape[1:]) == [2]
            assert len(dist.shape) == 2 and list(dist.shape[1:]) == [1]

        bs = pose.shape[0]  # 批量大小

        out = {}

        # 如果没有人,返回空字典
        if bs == 0:
            return {}
        
        # 准备 SMPL-X 模型的低维参数
        kwargs_pose = {
            'betas': shape,
        }
        kwargs_pose['global_orient'] = self.bm_x.global_orient.repeat(bs, 1)
        kwargs_pose['body_pose'] = pose[:, 1:22].flatten(1)  # 身体姿态
        kwargs_pose['left_hand_pose'] = pose[:, 22:37].flatten(1)  # 左手姿态
        kwargs_pose['right_hand_pose'] = pose[:, 37:52].flatten(1)  # 右手姿态
        kwargs_pose['jaw_pose'] = pose[:, 52:53].flatten(1)  # 下颌姿态

        # 如果提供了面部表情,添加面部表情参数
        if expression is not None:
            kwargs_pose['expression'] = expression.flatten(1)  # 面部表情
        else:
            kwargs_pose['expression'] = self.bm_x.expression.repeat(bs, 1)

        # 默认眼睛姿态
        kwargs_pose['leye_pose'] = self.bm_x.leye_pose.repeat(bs, 1)
        kwargs_pose['reye_pose'] = self.bm_x.reye_pose.repeat(bs, 1)        
        
        # 通过 SMPL-X 模型进行前向传播
        output = self.bm_x(**kwargs_pose)
        verts = output.vertices  # 网格顶点
        j3d = output.joints  # 3D 关节点
        
        # 将全局旋转应用于 3D 关节点
        R = roma.rotvec_to_rotmat(pose[:, 0])  # 将旋转向量转换为旋转矩阵
        pelvis = j3d[:, [0]]  # 骨盆关节点
        j3d = (R.unsqueeze(1) @ (j3d - pelvis).unsqueeze(-1)).squeeze(-1)
        
        # 将全局旋转应用于顶点
        verts = (R.unsqueeze(1) @ (verts - pelvis).unsqueeze(-1)).squeeze(-1)

        # 如果未提供平移向量,计算平移向量
        if transl is None:
            if K.dtype == torch.float16:
                transl = inverse_perspective_projection(loc.unsqueeze(1).float(), K.float(), dist.unsqueeze(1).float())[:, 0]    
                transl = transl.half()
            else:
                transl = inverse_perspective_projection(loc.unsqueeze(1), K, dist.unsqueeze(1))[:, 0]

        # 根据中心关节点更新平移向量
        transl_up = transl.clone()
        if self.person_center_idx is None:
            transl_up = transl_up + pelvis[:, 0]
        else:
            person_center = j3d[:, [self.person_center_idx]]
            verts = verts - person_center
            j3d = j3d - person_center

        # 将关节点和顶点转换到相机坐标系中
        j3d_cam = j3d + transl_up.unsqueeze(1)
        verts_cam = verts + transl_up.unsqueeze(1)

        # 将 3D 关节点和顶点投影到 2D 图像平面
        j2d = perspective_projection(j3d_cam, K)
        v2d = perspective_projection(verts_cam, K)

        # 更新输出字典,包含相关信息
        out.update({
            'v3d': verts_cam,  # 3D 相机空间中的顶点
            'j3d': j3d_cam,  # 3D 相机空间中的关节点
            'j2d': j2d,  # 2D 相机空间中的关节点
            'v2d': v2d,  # 2D 相机空间中的顶点
            'transl': transl,  # 平移向量
            'transl_pelvis': j3d_cam[:, [0]],  # 3D 相机空间中的骨盆平移
        })
            
        return out

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值