代码解析
- 转载请注明出处
1. 载入数据, 归一化
C = sio.loadmat('Data/example1.mat')
colors = C['colors']
colors = colors / np.max(colors)
# 这里是最大值归一化
vertices = vertices - np.mean(vertices, 0)[np.newaxis, :]
# 这个mean的维度是(3,), 因为是对所有的行也就是x,y,z求均值
2. 设置一些参数
'''
现实中一般脸是18cm长宽, 这里设置180=18cm, 图片尺寸256×256
'''
face in reality: ~18cm height/width. set 180 = 18cm. image size: 256 x 256
scale_init = 180 / (np.max(vertices[:, 1]) - np.min(vertices[:, 1]))
# 这里的scale是180与x坐标的范围或者y坐标范围的比值, 没太懂
# 正交投影
camera['proj_type'] = 'orthographic'
3. 参数变换带来的影响
scale参数的变化影响
for factor in np.arange(0.5, 1.2, 0.1):
obj['s'] = scale_init * factor
image = transform_test(vertices, obj, camera)
- 示例图:
angle参数变化影响
for i in range(3):
for angle in np.arange(-50, 51, 10):
obj['angles'][i] = angle
image = transform_test(vertices, obj, camera)
- 示例图, 绕x旋转
-
绕y旋转
-
绕z旋转
投影变换下eye位置的影响
camera['proj_type'] = 'perspective'
camera['at'] = [0, 0, 0]
camera['near'] = 1000
camera['far'] = -100 # (在near和far之间的才能被投影)
camera['fovy'] = 30
camera['up'] = [0, 1, 0]
# 从远到近的正对人脸
for p in np.arange(500, 250 - 1, -40): # 0.5m->0.25m
camera['eye'] = [0, 0, p] # stay in front of face
image = transform_test(vertices, obj, camera)
- 这里的transform_test :
def transform_test(vertices, obj, camera, h=256, w=256):
'''
根据矩阵做3d点欧式变换
'''
R = mesh.transform.angle2matrix(obj['angles'])
transformed_vertices = mesh.transform.similarity_transform(vertices, obj['s'], R, obj['t'])
if camera['proj_type'] == 'orthographic':
projected_vertices = transformed_vertices
image_vertices = mesh.transform.to_image(projected_vertices, h, w)
else:
## world space to camera space. (Look at camera.) # 只是相当于变换了z
'''
在从世界坐标系到相机坐标系的变换中, 这里只是因为eye方向的不同, 所以实质上只是变换了
'''
camera_vertices = mesh.transform.lookat_camera(transformed_vertices, camera['eye'], camera['at'], camera['up'])
## camera space to image space. (Projection) if orth project, omit
projected_vertices = mesh.transform.perspective_project(camera_vertices, camera['fovy'], near=camera['near'],
far=camera['far'])
## to image coords(position in image)
image_vertices = mesh.transform.to_image(projected_vertices, h, w, True)
rendering = mesh.render.render_colors(image_vertices, triangles, colors, h, w)
rendering = np.minimum((np.maximum(rendering, 0)), 1)
return rendering
- 下面是投影变换, 使用了从视锥到正方体的变换, 但是这里好像并没有视口变换
参见透视投影原理介绍
def perspective_project(vertices, fovy, aspect_ratio = 1., near = 0.1, far = 1000.):
''' perspective projection.
Args:
vertices: [nver, 3]
fovy: vertical angular field of view. degree.
aspect_ratio : width / height of field of view
near : depth of near clipping plane
far : depth of far clipping plane
Returns:
projected_vertices: [nver, 3]
'''
fovy = np.deg2rad(fovy)
top = near*np.tan(fovy) # 根据fovy角度算出的
bottom = -top
right = top*aspect_ratio
left = -right
#-- homo 齐次坐标
# 通过变换到正方体得到变换矩阵
P = np.array([[near/right, 0, 0, 0],
[0, near/top, 0, 0],
[0, 0, -(far+near)/(far-near), -2*far*near/(far-near)],
[0, 0, -1, 0]])
vertices_homo = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) # [nver, 4] # 齐次坐标
projected_vertices = vertices_homo.dot(P.T)
projected_vertices = projected_vertices/projected_vertices[:,3:] # 让第4维变为1
projected_vertices = projected_vertices[:,:3]
'''
y 反向
'''
projected_vertices[:,2] = -projected_vertices[:,2]
return projected_vertices