blender渲染不同位置的多张影像
创建不同位置的Camera
在不同位置创建相机,见相机设置:设置当前视角为相机位置
添加脚本
打开脚本页面输入:
import bpy
from mathutils import Matrix, Vector
#---------------------------------------------------------------
# 3x4 P matrix from Blender camera
#---------------------------------------------------------------
# BKE_camera_sensor_size
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
if sensor_fit == 'VERTICAL':
return sensor_y
return sensor_x
# BKE_camera_sensor_fit
def get_sensor_fit(sensor_fit, size_x, size_y):
if sensor_fit == 'AUTO':
if size_x >= size_y:
return 'HORIZONTAL'
else:
return 'VERTICAL'
return sensor_fit
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
# as well as
# https://blender.stackexchange.com/a/120063/3581
def get_calibration_matrix_K_from_blender(camd):
if camd.type != 'PERSP':
raise ValueError('Non-perspective cameras not supported')
scene = bpy.context.scene
f_in_mm = camd.lens
scale = scene.render.resolution_percentage / 100
resolution_x_in_px = scale * scene.render.resolution_x
resolution_y_in_px = scale * scene.render.resolution_y
sensor_size_in_mm = get_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height)
sensor_fit = get_sensor_fit(
camd.sensor_fit,
scene.render.pixel_aspect_x * resolution_x_in_px,
scene.render.pixel_aspect_y * resolution_y_in_px
)
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
if sensor_fit == 'HORIZONTAL':
view_fac_in_px = resolution_x_in_px
else:
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
pixel_size_mm_per_px = sensor_size_in_mm / f_in_mm / view_fac_in_px
s_u = 1 / pixel_size_mm_per_px
s_v = 1 / pixel_size_mm_per_px / pixel_aspect_ratio
# Parameters of intrinsic calibration matrix K
u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
skew = 0 # only use rectangular pixels
K = Matrix(
((s_u, skew, u_0),
( 0, s_v, v_0),
( 0, 0, 1)))
return K
# Returns camera rotation and translation matrices from Blender.
#
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def get_3x4_RT_matrix_from_blender(cam):
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# R_bcam2cv = Matrix(
# ((1, 0, 0),
# (0, -1, 0),
# (0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam @ location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam @ cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam @ location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv@R_world2bcam
T_world2cv = R_bcam2cv@T_world2bcam
#print(R_world2cv)
#print(T_world2cv)
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def get_3x4_P_matrix_from_blender(cam):
K = get_calibration_matrix_K_from_blender(cam.data)
RT = get_3x4_RT_matrix_from_blender(cam)
return K@RT, K, RT
def writeCamera(fo,K,RT):
# K
fo.write(str(K[0][0])+" ")
fo.write(str(K[0][1])+" ")
fo.write(str(K[0][2])+"\n")
fo.write(str(K[1][0])+" ")
fo.write(str(K[1][1])+" ")
fo.write(str(K[1][2])+"\n")
fo.write(str(K[2][0])+" ")
fo.write(str(K[2][1])+" ")
fo.write(str(K[2][2])+"\n")
# 0,0,0
fo.write(str(0)+" ")
fo.write(str(0)+" ")
fo.write(str(0)+"\n")
# R
fo.write(str(RT[0][0])+" ")
fo.write(str(RT[0][1])+" ")
fo.write(str(RT[0][2])+"\n")
fo.write(str(RT[1][0])+" ")
fo.write(str(RT[1][1])+" ")
fo.write(str(RT[1][2])+"\n")
fo.write(str(RT[2][0])+" ")
fo.write(str(RT[2][1])+" ")
fo.write(str(RT[2][2])+"\n")
location, rotation = cam.matrix_world.decompose()[0:2]
# T
fo.write(str(location[0])+" ")
fo.write(str(location[1])+" ")
fo.write(str(location[2])+"\n")
scene = bpy.context.scene
scale = scene.render.resolution_percentage / 100
resolution_x_in_px = scale * scene.render.resolution_x
resolution_y_in_px = scale * scene.render.resolution_y
# T
fo.write(str(resolution_x_in_px)+" ")
fo.write(str(resolution_y_in_px)+"\n")
def exportView(cam,pathDir,name):
# Open file
fo = open(pathDir+"\\"+name+".png.camera", "w")
# Insert your camera name here
P, K, RT = get_3x4_P_matrix_from_blender(cam)
#print("K")
#print(K)
#print("RT")
#print(RT)
#print("P")
#print(P)
V = Vector((0.0161, -1.465, -2.4284))
print("==== 3D Cursor projection ====")
pc = P @ V
pc /= pc[2]
#pc[1] = 1080 - pc[1]
print("Projected cursor location")
print(pc)
# Bonus code: save the 3x4 P matrix into a plain text file
# Don't forget to import numpy for this
#nP = numpy.matrix(P)
#numpy.savetxt("/tmp/P3x4.txt", nP) # to select precision, use e.g. fmt='%.2f'
writeCamera(fo,K,RT)
#bpy.context.scene.render.filepath = pathDir+"\\"+name+".png"
#bpy.context.scene.render.resolution_x = w #perhaps set resolution in code
#bpy.context.scene.render.resolution_y = h
#bpy.ops.render.render()
# Close file
fo.close()
# Save render image
bpy.context.scene.render.filepath = pathDir+"\\"+name+".png"
print(bpy.context.scene.render.filepath)
bpy.ops.render.render(use_viewport = True, write_still=True)
# ----------------------------------------------a------------
if __name__ == "__main__":
i = 0
for ob in bpy.data.objects:
print (ob.name)
#if("Camera" in ob.name):
if ob.type == 'CAMERA':
print (ob.name)
bpy.context.scene.camera = ob
cam = ob
#cam.data.len
print(cam.data.lens)
exportView(cam,r"D:\data\simulation\joyful\image","000"+str(i))
i = i + 1
这边输出的外参是正确的,但openMVG中的Strecha,中的R是转置的,因此需要改一下openMVG中的代码,不需要将R转置了
https://github.com/zju3dv/pvnet-rendering/blob/master/blender/blender_utils.py
https://visp-doc.inria.fr/doxygen/visp-3.4.0/tutorial-tracking-mb-generic-rgbd-Blender.html