blender渲染代码

这段代码展示了如何利用Blender接口导入3D模型并进行渲染,包括保存RGB图像、相机姿态和内参。代码支持在球面上均匀采样多个视角,生成不同角度的图像。此外,还提供了渲染深度图的代码,调整了深度图的范围以适应不同的距离。最后,有一个批量重命名图片的脚本,确保图片按顺序编号。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

参考代码:https://github.com/vsitzmann/shapenet_renderer
直接放到blender的编辑器里跑
并在代码中修改3D模型存放的文件地址

1 渲染并保存rgb、pose和相机内参

import bpy
from mathutils import Matrix, Vector
import os
import numpy as np
import math


def normalize(vec):
    return vec / (np.linalg.norm(vec, axis=-1, keepdims=True) + 1e-9)


def look_at(cam_location, point):
    # Cam points in positive z direction
    forward = point - cam_location
    forward = normalize(forward)

    tmp = np.array([0., -1., 0.])

    right = np.cross(tmp, forward)
    right = normalize(right)

    up = np.cross(forward, right)
    up = normalize(up)

    mat = np.stack((right, up, forward, cam_location), axis=-1)

    hom_vec = np.array([[0., 0., 0., 1.]])

    if len(mat.shape) > 2:
        hom_vec = np.tile(hom_vec, [mat.shape[0], 1, 1])

    mat = np.concatenate((mat, hom_vec), axis=-2)
    return mat


def sample_spherical(n, radius=1.):
    xyz = np.random.normal(size=(n, 3))
    xyz = normalize(xyz) * radius
    return xyz


def set_camera_focal_length_in_world_units(camera_data, focal_length):
    scene = bpy.context.scene
    resolution_x_in_px = scene.render.resolution_x
    resolution_y_in_px = scene.render.resolution_y
    scale = scene.render.resolution_percentage / 100
    sensor_width_in_mm = camera_data.sensor_width
    sensor_height_in_mm = camera_data.sensor_height
    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
    if (camera_data.sensor_fit == 'VERTICAL'):
        # the sensor height is fixed (sensor fit is horizontal),
        # the sensor width is effectively changed with the pixel aspect ratio
        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
        s_v = resolution_y_in_px * scale / sensor_height_in_mm
    else:  # 'HORIZONTAL' and 'AUTO'
        # the sensor width is fixed (sensor fit is horizontal),
        # the sensor height is effectively changed with the pixel aspect ratio
        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
        s_u = resolution_x_in_px * scale / sensor_width_in_mm
        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm

    camera_data.lens = focal_length / s_u


def cv_cam2world_to_bcam2world(cv_cam2world):
    '''

    :cv_cam2world: numpy array.
    :return:
    '''
    R_bcam2cv = Matrix(
        ((1, 0, 0),
         (0, -1, 0),
         (0, 0, -1)))

    cam_location = Vector(cv_cam2world[:3, -1].tolist())
    cv_cam2world_rot = Matrix(cv_cam2world[:3, :3].tolist())

    cv_world2cam_rot = cv_cam2world_rot.transposed()
    cv_translation = -1. * cv_world2cam_rot * cam_location

    blender_world2cam_rot = R_bcam2cv * cv_world2cam_rot
    blender_translation = R_bcam2cv * cv_translation

    blender_cam2world_rot = blender_world2cam_rot.transposed()
    blender_cam_location = -1. * blender_cam2world_rot * blender_translation

    blender_matrix_world = Matrix((
        blender_cam2world_rot[0][:] + (blender_cam_location[0],),
        blender_cam2world_rot[1][:] + (blender_cam_location[1],),
        blender_cam2world_rot[2][:] + (blender_cam_location[2],),
        (0, 0, 0, 1)
    ))

    return blender_matrix_world


def get_world2cam_from_blender_cam(cam):
    # bcam stands for blender camera
    R_bcam2cv = Matrix(
        ((1, 0, 0),
         (0, -1, 0),
         (0, 0, -1)))

    # Transpose since the rotation is object rotation,
    # and we want coordinate rotation
    # Use matrix_world instead to account for all constraints
    location, rotation = cam.matrix_world.decompose()[0:2]  # Matrix_world returns the cam2world matrix.
    R_world2bcam = rotation.to_matrix().transposed()

    # Convert camera location to translation vector used in coordinate changes
    # T_world2bcam = -1*R_world2bcam*cam.location
    # Use location from matrix_world to account for constraints:
    T_world2bcam = -1 * R_world2bcam * location

    # Build the coordinate transform matrix from world to computer vision camera
    R_world2cv = R_bcam2cv * R_world2bcam
    T_world2cv = R_bcam2cv * T_world2bcam

    # put into 3x4 matrix
    RT = Matrix((
        R_world2cv[0][:] + (T_world2cv[0],),
        R_world2cv[1][:] + (T_world2cv[1],),
        R_world2cv[2][:] + (T_world2cv[2],),
        (0, 0, 0, 1)
    ))
    return RT


def get_calibration_matrix_K_from_blender(camd):
    f_in_mm = camd.lens
    scene = bpy.context.scene
    resolution_x_in_px = scene.render.resolution_x
    resolution_y_in_px = scene.render.resolution_y
    scale = scene.render.resolution_percentage / 100
    sensor_width_in_mm = camd.sensor_width
    sensor_height_in_mm = camd.sensor_height
    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
    if (camd.sensor_fit == 'VERTICAL'):
        # the sensor height is fixed (sensor fit is horizontal),
        # the sensor width is effectively changed with the pixel aspect ratio
        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
        s_v = resolution_y_in_px * scale / sensor_height_in_mm
    else:  # 'HORIZONTAL' and 'AUTO'
        # the sensor width is fixed (sensor fit is horizontal),
        # the sensor height is effectively changed with the pixel aspect ratio
        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
        s_u = resolution_x_in_px * scale / sensor_width_in_mm
        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm

    # Parameters of intrinsic calibration matrix K
    alpha_u = f_in_mm * s_u
    alpha_v = f_in_mm * s_v
    u_0 = resolution_x_in_px * scale / 2
    v_0 = resolution_y_in_px * scale / 2
    skew = 0  # only use rectangular pixels

    K = Matrix(
        ((alpha_u, skew, u_0),
         (0, alpha_v, v_0),
         (0, 0, 1)))
    return K


def uniform_sample(r, n, seed):
    np.random.seed(seed)
    translations = []
    u, v = np.random.rand(2, n)
    theat = 2 * np.pi * u
    phi = np.arccos(2 * v - 1)
    x = r * np.sin(theat) * np.cos(phi)
    y = r * np.sin(theat) * np.sin(phi)
    z = r * np.cos(theat)

    for i in range(n):
        if y[i] > 0:
            translations.append([x[i], y[i], z[i]])

    return np.array(translations)


def Fi_po(sphere_radius, num_steps):
    # 斐波那契采样
    N = 2 * num_steps
    phi = (np.sqrt(5) - 1) / 2
    translations = []

    for i in range(num_steps):
        y = -((2 * i + 1) / N - 1)
        x = (np.sqrt(1 - y ** 2)) * np.cos(2 * np.pi * (i + 1) * phi)
        z = (np.sqrt(1 - y ** 2)) * np.sin(2 * np.pi * (i + 1) * phi)

        translations.append((x, y, z))

    return sphere_radius * np.array(translations)


def cond_mkdir(path):
    path = os.path.normpath(path)
    if not os.path.exists(path):
        os.makedirs(path)

    return path


def dump(obj):
    for attr in dir(obj):
        if hasattr(obj, attr):
            print("obj.%s = %s" % (attr, getattr(obj, attr)))


def get_archimedean_spiral(sphere_radius, num_steps=250):
    '''
    https://en.wikipedia.org/wiki/Spiral, section "Spherical spiral". c = a / pi
    '''
    a = 40
    r = sphere_radius

    translations = []

    i = a / 2
    while i < a:
        theta = i / a * math.pi
        x = r * math.sin(theta) * math.cos(-i)
        z = r * math.sin(-theta + math.pi) * math.sin(-i)
        y = r * - math.cos(theta)

        translations.append((x, y, z))
        i += a / (2 * num_steps)

    return np.array(translations)


class BlenderInterface():
    def __init__(self, resolution=128, background_color=(0, 0, 0)):
        self.resolution = resolution

        # Delete the default cube (default selected)
        bpy.ops.object.delete()

        # Deselect all. All new object added to the scene will automatically selected.
        self.blender_renderer = bpy.context.scene.render
        self.blender_renderer.use_antialiasing = False
        # 调节图像分辨率
        self.blender_renderer.resolution_x = resolution
        self.blender_renderer.resolution_y = resolution
        self.blender_renderer.resolution_percentage = 100
        self.blender_renderer.image_settings.file_format = 'PNG'  # set output format to .png

        self.blender_renderer.alpha_mode = 'TRANSPARENT'

        world = bpy.context.scene.world
        world.horizon_color = background_color
        world.light_settings.use_environment_light = True
        world.light_settings.environment_color = 'SKY_COLOR'
        world.light_settings.environment_energy = 1.

        lamp1 = bpy.data.lamps['Lamp']
        lamp1.type = 'SUN'
        lamp1.shadow_method = 'NOSHADOW'
        lamp1.use_specular = False
        lamp1.energy = 1.

        bpy.ops.object.lamp_add(type='SUN')
        lamp2 = bpy.data.lamps['Sun']
        lamp2.shadow_method = 'NOSHADOW'
        lamp2.use_specular = False
        lamp2.energy = 1.
        bpy.data.objects['Sun'].rotation_euler = bpy.data.objects['Lamp'].rotation_euler
        bpy.data.objects['Sun'].rotation_euler[0] += 180

        bpy.ops.object.lamp_add(type='SUN')
        lamp2 = bpy.data.lamps['Sun.001']
        lamp2.shadow_method = 'NOSHADOW'
        lamp2.use_specular = False
        lamp2.energy = 0.3
        bpy.data.objects['Sun.001'].rotation_euler = bpy.data.objects['Lamp'].rotation_euler
        bpy.data.objects['Sun.001'].rotation_euler[0] += 90

        # Set up the camera
        self.camera = bpy.context.scene.camera
        self.camera.data.sensor_height = self.camera.data.sensor_width  # Square sensor
        set_camera_focal_length_in_world_units(self.camera.data,
                                               525./512*resolution)  # Set focal length to a common value (kinect)

        bpy.ops.object.select_all(action='DESELECT')

    def import_mesh(self, fpath, scale=1., object_world_matrix=None):
        ext = os.path.splitext(fpath)[-1]
        if ext == '.obj':
            bpy.ops.import_scene.obj(filepath=str(fpath), split_mode='OFF')
        elif ext == '.ply':
            bpy.ops.import_mesh.ply(filepath=str(fpath))

        obj = bpy.context.selected_objects[0]
        dump(bpy.context.selected_objects)

        if object_world_matrix is not None:
            obj.matrix_world = object_world_matrix

        bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
        obj.location = (0., 0., 0.)  # center the bounding box!

        if scale != 1.:
            bpy.ops.transform.resize(value=(scale, scale, scale))

        # Disable transparency & specularities
        M = bpy.data.materials
        for i in range(len(M)):
            M[i].use_transparency = False
            M[i].specular_intensity = 0.0

        # Disable texture interpolation
        T = bpy.data.textures
        for i in range(len(T)):
            try:
                T[i].use_interpolation = False
                T[i].use_mipmap = False
                T[i].use_filter_size_min = True
                T[i].filter_type = "BOX"
            except:
                continue

    def render(self, output_dir, blender_cam2world_matrices, write_cam_params=False):

        if write_cam_params:
            img_dir = os.path.join(output_dir, 'rgb')
            pose_dir = os.path.join(output_dir, 'pose')

            cond_mkdir(img_dir)
            cond_mkdir(pose_dir)
        else:
            img_dir = output_dir
            cond_mkdir(img_dir)

        if write_cam_params:
            K = get_calibration_matrix_K_from_blender(self.camera.data)
            with open(os.path.join(output_dir, 'intrinsics.txt'), 'w') as intrinsics_file:
                intrinsics_file.write('%f %f %f 0.\n' % (K[0][0], K[0][2], K[1][2]))
                intrinsics_file.write('0. 0. 0.\n')
                intrinsics_file.write('1.\n')
                intrinsics_file.write('%d %d\n' % (self.resolution, self.resolution))

        for i in range(len(blender_cam2world_matrices)):
            self.camera.matrix_world = blender_cam2world_matrices[i]

            # Render the object
            if os.path.exists(os.path.join(img_dir, '%06d.png' % i)):
                continue

            # Render the color image
            self.blender_renderer.filepath = os.path.join(img_dir, '%06d.png' % i)

            bpy.ops.render.render(write_still=True)

            if write_cam_params:
                # Write out camera pose
                # RT = get_world2cam_from_blender_cam(self.camera)
                cam2world = blender_cam2world_matrices[i]
                with open(os.path.join(pose_dir, '%06d.txt' % i), 'w') as pose_file:
                    matrix_flat = []
                    for j in range(4):
                        for k in range(4):
                            matrix_flat.append(cam2world[j][k])
                    pose_file.write(' '.join(map(str, matrix_flat)) + '\n')

        # Remember which meshes were just imported
        meshes_to_remove = []
        for ob in bpy.context.selected_objects:
            meshes_to_remove.append(ob.data)

        bpy.ops.object.delete()

        # Remove the meshes from memory too
        for mesh in meshes_to_remove:
            bpy.data.meshes.remove(mesh)


"""
这里是获得rgb和pose以及内参
为了使depth模式渲染的姿态与rgb一致,我们固定了每次渲染的随机参数,test、train、val的随机种子分别为8,6,7
当然随机种子可以修改,只是depth模式与rgb模式的一致
并且,这里随机的观察视角是在一个球面上,如果我们只需要一半 则真实的观察视角个数应当 等于 num_observations/2
当然,随机种子不一定刚好在正半球和负半球是相等的个数,可能存在几个微小的差异,但这没关系,不影响收集训练集、测试集和验证集
如果非要严格按照标准数据个数收集,我们可以测试一组正半球大于或等于负半球的观察数,多则删除
"""
mode = 'test'
num_observations = 200
sphere_radius = 4
# mesh_fpath是模型地址
mesh_fpath = '/home/liuyuxing/render_data/car/car_red_00/model_4_red/untitled.obj'
# 存放输出文件地址
instance_dir = '/home/liuyuxing/render_data/car/car_red_00/model_4_red/train'

renderer = BlenderInterface(resolution=400)

# train_seed = 6 val_seed = 7 test_seed = 8
# uniform_sample()是在球面上均采样视角
# 可以根据需要更换采样方式,本文还提供了get_archimedean_spiral()和Fi_po()
if mode == 'train':
    cam_locations = uniform_sample(sphere_radius, num_observations, 6)
elif mode == 'test':
    cam_locations = uniform_sample(sphere_radius, num_observations, 8)
elif mode == 'val':
    cam_locations = uniform_sample(sphere_radius, num_observations, 7)

obj_location = np.zeros((1, 3))

cv_poses = look_at(cam_locations, obj_location)

blender_poses = [cv_cam2world_to_bcam2world(m) for m in cv_poses]

shapenet_rotation_mat = np.array([[1.0000000e+00, 0.0000000e+00, 0.0000000e+00],
                                  [0.0000000e+00, -1.0000000e+00, -1.2246468e-16],
                                  [0.0000000e+00, 1.2246468e-16, -1.0000000e+00]])
rot_mat = np.eye(3)
hom_coords = np.array([[0., 0., 0., 1.]]).reshape(1, 4)
obj_pose = np.concatenate((rot_mat, obj_location.reshape(3, 1)), axis=-1)
obj_pose = np.concatenate((obj_pose, hom_coords), axis=0)

renderer.import_mesh(mesh_fpath, scale=1., object_world_matrix=obj_pose)
renderer.render(instance_dir, blender_poses, write_cam_params=True)


2 渲染保存深度图

map.offset[0] = -g_depth_clip_start
map.size[0] = 1 / (g_depth_clip_end - g_depth_clip_start)
修改相关参数的函数是:def render()

  • 深度图保存的是0-255的像素,其对应相机最近截至距离和最远截至距离g_depth_clip_start~g_depth_clip_end
  • 每个深度像素表示相机到物体的真实距离(注意这里并非是z)
import bpy
from mathutils import Matrix, Vector
import os
import numpy as np
import math


def normalize(vec):
    return vec / (np.linalg.norm(vec, axis=-1, keepdims=True) + 1e-9)


def look_at(cam_location, point):
    # Cam points in positive z direction
    forward = point - cam_location
    forward = normalize(forward)

    tmp = np.array([0., -1., 0.])

    right = np.cross(tmp, forward)
    right = normalize(right)

    up = np.cross(forward, right)
    up = normalize(up)

    mat = np.stack((right, up, forward, cam_location), axis=-1)

    hom_vec = np.array([[0., 0., 0., 1.]])

    if len(mat.shape) > 2:
        hom_vec = np.tile(hom_vec, [mat.shape[0], 1, 1])

    mat = np.concatenate((mat, hom_vec), axis=-2)
    return mat


def set_camera_focal_length_in_world_units(camera_data, focal_length):
    scene = bpy.context.scene
    resolution_x_in_px = scene.render.resolution_x
    resolution_y_in_px = scene.render.resolution_y
    scale = scene.render.resolution_percentage / 100
    sensor_width_in_mm = camera_data.sensor_width
    sensor_height_in_mm = camera_data.sensor_height
    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
    if (camera_data.sensor_fit == 'VERTICAL'):
        # the sensor height is fixed (sensor fit is horizontal),
        # the sensor width is effectively changed with the pixel aspect ratio
        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
        s_v = resolution_y_in_px * scale / sensor_height_in_mm
    else:  # 'HORIZONTAL' and 'AUTO'
        # the sensor width is fixed (sensor fit is horizontal),
        # the sensor height is effectively changed with the pixel aspect ratio
        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
        s_u = resolution_x_in_px * scale / sensor_width_in_mm
        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm

    camera_data.lens = focal_length / s_u


def cv_cam2world_to_bcam2world(cv_cam2world):
    '''

    :cv_cam2world: numpy array.
    :return:
    '''
    R_bcam2cv = Matrix(
        ((1, 0, 0),
         (0, -1, 0),
         (0, 0, -1)))

    cam_location = Vector(cv_cam2world[:3, -1].tolist())
    cv_cam2world_rot = Matrix(cv_cam2world[:3, :3].tolist())

    cv_world2cam_rot = cv_cam2world_rot.transposed()
    cv_translation = -1. * cv_world2cam_rot * cam_location

    blender_world2cam_rot = R_bcam2cv * cv_world2cam_rot
    blender_translation = R_bcam2cv * cv_translation

    blender_cam2world_rot = blender_world2cam_rot.transposed()
    blender_cam_location = -1. * blender_cam2world_rot * blender_translation

    blender_matrix_world = Matrix((
        blender_cam2world_rot[0][:] + (blender_cam_location[0],),
        blender_cam2world_rot[1][:] + (blender_cam_location[1],),
        blender_cam2world_rot[2][:] + (blender_cam_location[2],),
        (0, 0, 0, 1)
    ))

    return blender_matrix_world


def get_world2cam_from_blender_cam(cam):
    # bcam stands for blender camera
    R_bcam2cv = Matrix(
        ((1, 0, 0),
         (0, -1, 0),
         (0, 0, -1)))

    # Transpose since the rotation is object rotation,
    # and we want coordinate rotation
    # Use matrix_world instead to account for all constraints
    location, rotation = cam.matrix_world.decompose()[0:2]  # Matrix_world returns the cam2world matrix.
    R_world2bcam = rotation.to_matrix().transposed()

    # Convert camera location to translation vector used in coordinate changes
    # T_world2bcam = -1*R_world2bcam*cam.location
    # Use location from matrix_world to account for constraints:
    T_world2bcam = -1 * R_world2bcam * location

    # Build the coordinate transform matrix from world to computer vision camera
    R_world2cv = R_bcam2cv * R_world2bcam
    T_world2cv = R_bcam2cv * T_world2bcam

    # put into 3x4 matrix
    RT = Matrix((
        R_world2cv[0][:] + (T_world2cv[0],),
        R_world2cv[1][:] + (T_world2cv[1],),
        R_world2cv[2][:] + (T_world2cv[2],),
        (0, 0, 0, 1)
    ))
    return RT


def get_calibration_matrix_K_from_blender(camd):
    f_in_mm = camd.lens
    scene = bpy.context.scene
    resolution_x_in_px = scene.render.resolution_x
    resolution_y_in_px = scene.render.resolution_y
    scale = scene.render.resolution_percentage / 100
    sensor_width_in_mm = camd.sensor_width
    sensor_height_in_mm = camd.sensor_height
    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
    if (camd.sensor_fit == 'VERTICAL'):
        # the sensor height is fixed (sensor fit is horizontal),
        # the sensor width is effectively changed with the pixel aspect ratio
        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
        s_v = resolution_y_in_px * scale / sensor_height_in_mm
    else:  # 'HORIZONTAL' and 'AUTO'
        # the sensor width is fixed (sensor fit is horizontal),
        # the sensor height is effectively changed with the pixel aspect ratio
        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
        s_u = resolution_x_in_px * scale / sensor_width_in_mm
        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm

    # Parameters of intrinsic calibration matrix K
    alpha_u = f_in_mm * s_u
    alpha_v = f_in_mm * s_v
    u_0 = resolution_x_in_px * scale / 2
    v_0 = resolution_y_in_px * scale / 2
    skew = 0  # only use rectangular pixels

    K = Matrix(
        ((alpha_u, skew, u_0),
         (0, alpha_v, v_0),
         (0, 0, 1)))
    return K


def uniform_sample(r, n, seed):
    np.random.seed(seed)
    translations = []
    u, v = np.random.rand(2, n)
    theat = 2 * np.pi * u
    phi = np.arccos(2 * v - 1)
    x = r * np.sin(theat) * np.cos(phi)
    y = r * np.sin(theat) * np.sin(phi)
    z = r * np.cos(theat)

    for i in range(n):
        if y[i] > 0:
            translations.append([x[i], y[i], z[i]])

    return np.array(translations)


def Fi_po(sphere_radius, num_steps):
    N = 2 * num_steps
    phi = (np.sqrt(5) - 1) / 2
    translations = []

    for i in range(num_steps):
        y = -((2 * i + 1) / N - 1)
        x = (np.sqrt(1 - y ** 2)) * np.cos(2 * np.pi * (i + 1) * phi)
        z = (np.sqrt(1 - y ** 2)) * np.sin(2 * np.pi * (i + 1) * phi)

        translations.append((x, y, z))

    return sphere_radius * np.array(translations)


def cond_mkdir(path):
    path = os.path.normpath(path)
    if not os.path.exists(path):
        os.makedirs(path)

    return path


def dump(obj):
    for attr in dir(obj):
        if hasattr(obj, attr):
            print("obj.%s = %s" % (attr, getattr(obj, attr)))


def get_archimedean_spiral(sphere_radius, num_steps=250):
    '''
    https://en.wikipedia.org/wiki/Spiral, section "Spherical spiral". c = a / pi
    '''
    a = 40
    r = sphere_radius

    translations = []

    i = a / 2
    while i < a:
        theta = i / a * math.pi
        x = r * math.sin(theta) * math.cos(-i)
        z = r * math.sin(-theta + math.pi) * math.sin(-i)
        y = r * - math.cos(theta)

        translations.append((x, y, z))
        i += a / (2 * num_steps)

    return np.array(translations)


class BlenderInterface():
    def __init__(self, resolution=128, background_color=(0, 0, 0)):
        self.resolution = resolution

        # Delete the default cube (default selected)
        bpy.ops.object.delete()

        # Deselect all. All new object added to the scene will automatically selected.
        self.blender_renderer = bpy.context.scene.render
        self.blender_renderer.use_antialiasing = False
        self.blender_renderer.resolution_x = resolution
        self.blender_renderer.resolution_y = resolution
        self.blender_renderer.resolution_percentage = 100
        self.blender_renderer.image_settings.file_format = 'PNG'  # set output format to .png

        # Add passes for additionally dumping albedo and normals.
        self.blender_renderer.layers["RenderLayer"].use_pass_normal = True
        self.blender_renderer.layers["RenderLayer"].use_pass_color = True

        self.blender_renderer.alpha_mode = 'TRANSPARENT'

        bpy.context.scene.use_nodes = True

        world = bpy.context.scene.world
        world.horizon_color = background_color
        world.light_settings.use_environment_light = True
        world.light_settings.environment_color = 'SKY_COLOR'
        world.light_settings.environment_energy = 1.

        lamp1 = bpy.data.lamps['Lamp']
        lamp1.type = 'SUN'
        lamp1.shadow_method = 'NOSHADOW'
        lamp1.use_specular = False
        lamp1.energy = 1.

        bpy.ops.object.lamp_add(type='SUN')
        lamp2 = bpy.data.lamps['Sun']
        lamp2.shadow_method = 'NOSHADOW'
        lamp2.use_specular = False
        lamp2.energy = 1.
        bpy.data.objects['Sun'].rotation_euler = bpy.data.objects['Lamp'].rotation_euler
        bpy.data.objects['Sun'].rotation_euler[0] += 180

        bpy.ops.object.lamp_add(type='SUN')
        lamp2 = bpy.data.lamps['Sun.001']
        lamp2.shadow_method = 'NOSHADOW'
        lamp2.use_specular = False
        lamp2.energy = 1
        bpy.data.objects['Sun.001'].rotation_euler = bpy.data.objects['Lamp'].rotation_euler
        bpy.data.objects['Sun.001'].rotation_euler[0] += 90

        # bpy.data.cameras['Camera'].clip_start = 0.5
        # bpy.data.cameras['Camera'].clip_end = 6

        # Set up the camera
        self.camera = bpy.context.scene.camera
        cam_constraint = self.camera.constraints.new(type='TRACK_TO')
        cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
        cam_constraint.up_axis = 'UP_Y'

        self.camera.data.sensor_height = self.camera.data.sensor_width  # Square sensor
        set_camera_focal_length_in_world_units(self.camera.data,
                                               525./512*resolution)  # Set focal length to a common value (kinect)

        bpy.ops.object.select_all(action='DESELECT')

    def import_mesh(self, fpath, scale=1., object_world_matrix=None):
        ext = os.path.splitext(fpath)[-1]
        if ext == '.obj':
            bpy.ops.import_scene.obj(filepath=str(fpath), split_mode='OFF')
        elif ext == '.ply':
            bpy.ops.import_mesh.ply(filepath=str(fpath))

        obj = bpy.context.selected_objects[0]
        dump(bpy.context.selected_objects)

        if object_world_matrix is not None:
            obj.matrix_world = object_world_matrix

        bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
        obj.location = (0., 0., 0.)  # center the bounding box!

        if scale != 1.:
            bpy.ops.transform.resize(value=(scale, scale, scale))

        # Disable transparency & specularities
        M = bpy.data.materials
        for i in range(len(M)):
            M[i].use_transparency = False
            M[i].specular_intensity = 0.0

        # Disable texture interpolation
        T = bpy.data.textures
        for i in range(len(T)):
            try:
                T[i].use_interpolation = False
                T[i].use_mipmap = False
                T[i].use_filter_size_min = True
                T[i].filter_type = "BOX"
            except:
                continue

    def render(self, output_dir, blender_cam2world_matrices, write_cam_params=False):

        # Set up rendering of depth map.
        bpy.context.scene.use_nodes = True
        tree = bpy.context.scene.node_tree
        links = tree.links

        # Clear default nodes
        for n in tree.nodes:
            tree.nodes.remove(n)

        # Create input render layer node.
        render_layers = tree.nodes.new('CompositorNodeRLayers')

        depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
        depth_file_output.label = 'Depth Output'

        # Remap as other types can not represent the full range of depth.
        map = tree.nodes.new(type="CompositorNodeMapValue")
        # Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
        # map.offset[0] = -g_depth_clip_start
        # map.size[0] = 1 / (g_depth_clip_end - g_depth_clip_start)
        # 深度图保存的是0-255的像素,其对应相机最近截至距离和最远截至距离g_depth_clip_start~g_depth_clip_end
        # 每个深度像素表示相机到物体的真实距离(注意这里并非是z) 
        # map.use_min = True
        # map.use_max = True
        # map.min[0] = 0.0
        # map.max[0] = 1.0
        # 深度范围:0.7~10.7
        map.offset = [-0.7]
        map.size = [0.1]
        map.use_min = True
        map.min = [0]
        links.new(render_layers.outputs['Depth'], map.inputs[0])

        links.new(map.outputs[0], depth_file_output.inputs[0])

        scale_normal = tree.nodes.new(type="CompositorNodeMixRGB")
        scale_normal.blend_type = 'MULTIPLY'
        scale_normal.use_alpha = True
        scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1)
        links.new(render_layers.outputs['Normal'], scale_normal.inputs[1])

        bias_normal = tree.nodes.new(type="CompositorNodeMixRGB")
        bias_normal.blend_type = 'ADD'
        bias_normal.use_alpha = True
        bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0)
        links.new(scale_normal.outputs[0], bias_normal.inputs[1])

        normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
        normal_file_output.label = 'Normal Output'
        links.new(bias_normal.outputs[0], normal_file_output.inputs[0])

        albedo_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
        albedo_file_output.label = 'Albedo Output'
        links.new(render_layers.outputs['Color'], albedo_file_output.inputs[0])

        for output_node in [depth_file_output, normal_file_output, albedo_file_output]:
            output_node.base_path = ''

        if write_cam_params:
            img_dir = os.path.join(output_dir, 'rgb')
            pose_dir = os.path.join(output_dir, 'pose')

            cond_mkdir(img_dir)
            cond_mkdir(pose_dir)
        else:
            img_dir = output_dir
            cond_mkdir(img_dir)

        # if write_cam_params:
        #     K = get_calibration_matrix_K_from_blender(self.camera.data)
        #     with open(os.path.join(output_dir, 'intrinsics.txt'), 'w') as intrinsics_file:
        #         intrinsics_file.write('%f %f %f 0.\n' % (K[0][0], K[0][2], K[1][2]))
        #         intrinsics_file.write('0. 0. 0.\n')
        #         intrinsics_file.write('1.\n')
        #         intrinsics_file.write('%d %d\n' % (self.resolution, self.resolution))

        for i in range(len(blender_cam2world_matrices)):
            self.camera.matrix_world = blender_cam2world_matrices[i]

            # Render the object
            if os.path.exists(os.path.join(img_dir, '%06d.png' % i)):
                continue

            # Render the color image
            self.blender_renderer.filepath = os.path.join(img_dir, '%06d' % i)
            depth_file_output.file_slots[0].path = self.blender_renderer.filepath + "_depth.png"
            normal_file_output.file_slots[0].path = self.blender_renderer.filepath + "_normal.png"
            albedo_file_output.file_slots[0].path = self.blender_renderer.filepath + "_albedo.png"

            bpy.ops.render.render(write_still=True)

            # if write_cam_params:
            #     # Write out camera pose
            #     # RT = get_world2cam_from_blender_cam(self.camera)
            #     cam2world = blender_cam2world_matrices[i]
            #     with open(os.path.join(pose_dir, '%06d.txt' % i), 'w') as pose_file:
            #         matrix_flat = []
            #         for j in range(4):
            #             for k in range(4):
            #                 matrix_flat.append(cam2world[j][k])
            #         pose_file.write(' '.join(map(str, matrix_flat)) + '\n')

        # Remember which meshes were just imported
        meshes_to_remove = []
        for ob in bpy.context.selected_objects:
            meshes_to_remove.append(ob.data)

        bpy.ops.object.delete()

        # Remove the meshes from memory too
        for mesh in meshes_to_remove:
            bpy.data.meshes.remove(mesh)


"""
这里是渲染rgb的深度模式,这与渲染rgb、pose的代码一致,只不过是分开保存
为了使depth模式渲染的姿态与rgb一致,我们固定了每次渲染的随机参数,test、train、val的随机种子分别为8,6,7
当然随机种子可以修改,只是depth模式与rgb模式的一致
并且,这里随机的观察视角是在一个球面上,如果我们只需要一半 则真实的观察视角个数应当 等于 num_observations/2
当然,随机种子不一定刚好在正半球和负半球是相等的个数,可能存在几个微小的差异,但这没关系,不影响收集训练集、测试集和验证集
如果非要严格按照标准数据个数收集,我们可以测试一组正半球大于或等于负半球的观察数,多则删除
"""
mode = 'test'
num_observations = 200
sphere_radius = 4
mesh_fpath = '/home/liuyuxing/render_data/car/car_red_00/model_4_red/untitled.obj'
instance_dir = '/home/liuyuxing/render_data/car/car_red_00/model_4_red/depth'

renderer = BlenderInterface(resolution=400)

# train_seed = 6 val_seed = 7 test_seed = 8
if mode == 'train':
    cam_locations = uniform_sample(sphere_radius, num_observations, 6)
elif mode == 'test':
    cam_locations = uniform_sample(sphere_radius, num_observations, 8)
elif mode == 'val':
    cam_locations = uniform_sample(sphere_radius, num_observations, 7)

obj_location = np.zeros((1, 3))

cv_poses = look_at(cam_locations, obj_location)

blender_poses = [cv_cam2world_to_bcam2world(m) for m in cv_poses]


rot_mat = np.eye(3)
hom_coords = np.array([[0., 0., 0., 1.]]).reshape(1, 4)
obj_pose = np.concatenate((rot_mat, obj_location.reshape(3, 1)), axis=-1)
obj_pose = np.concatenate((obj_pose, hom_coords), axis=0)

renderer.import_mesh(mesh_fpath, scale=1., object_world_matrix=obj_pose)
renderer.render(instance_dir, blender_poses, write_cam_params=True)

3 按顺序重新对图片编号

import os


class BatchRename():

    def __init__(self):
        self.path = '/home/liuyuxing/PycharmProjects/nerf-pytorch-master/data/car_red/train/'

    def rename(self):
        filelist = os.listdir(self.path)
        filelist.sort()
        total_num = len(filelist)
        i = 0
        for item in filelist:
            if item.endswith('.png'):  # 要识别的文件格式尾缀
                src = os.path.join(os.path.abspath(self.path), item)
                s = str(i)
                # s = s.zfill(6)
                dst = os.path.join(os.path.abspath(self.path), 'r_' + s + '.png')

                try:
                    os.rename(src, dst)
                    print('converting %s to %s ...' % (src, dst))
                    i = i + 1
                except:
                    continue
        print('total %d to rename & converted %d pngs' % (total_num, i))


if __name__ == '__main__':
    demo = BatchRename()
    demo.rename()

### Blender 渲染案例与教程 #### 使用 Cycles 和 Eevee 进行高质量渲染 Blender 提供了两种主要的渲染引擎:Cycles 和 Eevee。对于追求照片级真实感的效果,Cycles 是首选[^1]。该引擎基于路径追踪技术,能够精确模拟光线的行为,从而生成逼真的阴影、反射和折射效果。 另一方面,Eevee 则以其出色的实时渲染性能著称,在游戏开发以及交互式项目中有广泛应用价值[^3]。它允许艺术家们在创作过程中立即预览最终图像的质量,极大地提高了工作效率。 #### 实战演练——科幻汽车建模与渲染 针对具体的应用场景,有一个非常详细的硬表面建模及渲染教程可供参考。此教程分为两个部分,第一部分内容涵盖了硬表面建模技巧及其配套插件 Boxcutter 的使用方法;而第二部分则通过构建一辆名为 "OffRoader" 的未来派越野车实例来巩固所学技能[^2]。这不仅有助于理解理论知识的实际运用方式,同时也展示了如何结合不同工具和技术实现复杂项目的全流程管理。 ```python import bpy # 设置渲染引擎为 Cycles 或者 Eevee bpy.context.scene.render.engine = 'CYCLES' # or 'BLENDER_EEVEE' # 调整采样数量以提高质量 (仅适用于 Cycles) if bpy.context.scene.render.engine == 'CYCLES': bpy.context.scene.cycles.samples = 128 # 启用实时视口渲染 (仅适用于 Eevee) elif bpy.context.scene.render.engine == 'BLENDER_EEVEE': bpy.context.space_data.shading.type = 'RENDERED' ``` 这段 Python 代码片段展示了如何切换 Blender 中使用的渲染引擎,并根据选择调整相应的设置参数,以便获得最佳视觉效果。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值