基础融合方式
在FFHQ项目中,生成不同面部的同时,stage2和stage3的模型在脖颈和后脑勺处的顶点也会产生细微偏差,为了对齐标准头部(或者平均头部),所以需要融合一下
对于处于适配区中的输入头部和标准头部的坐标点,进行加权平均,挨着输入区的领输入头部的坐标权重为1,标准头部坐标权重为0,同样的,挨着标准区部分的输入头部坐标权重为0,标准头部坐标权重为1。在选取适配区的时候,这里采用了z轴和y轴区间共同进行选取,以此实现简单的头部obj融合
import numpy as np
def read_obj(file_path):
vertices = []
vertex_lines = []
other_lines = []
with open(file_path, 'r') as f:
for line in f:
if line.startswith('v '):
vertex_lines.append(line.strip())
vertex = [float(x) for x in line.strip().split()[1:]]
vertices.append(vertex)
elif line.startswith('vt ') or line.startswith('f '):
other_lines.append(line)
return np.array(vertices), vertex_lines, other_lines
def write_obj(file_path, vertices, vertex_lines, other_lines):
with open(file_path, 'w') as f:
for line in vertex_lines:
f.write(line + '\n')
for line in other_lines:
f.write(line)
def adapt_blendshapes(standard_obj_path, input_obj_path, threshold1, threshold2, ax):
standard_vertices, _, standard_other_lines = read_obj(standard_obj_path)
input_vertices, input_vertex_lines, input_other_lines = read_obj(input_obj_path)
# 输出顶点是在输入obj的顶点基础上进行修改的
# adaption_indices和adaption_indices2是适配区间的选取,两者需要取交集,adaption_indices3是获取标准部分的坐标点
adaptation_indices = np.where(input_vertices[:, ax] < threshold1)[0]
adaptation_indices2 = np.where(input_vertices[:, ax] > threshold2)[0]
adaptation_indices3 = np.where(input_vertices[:, ax] < threshold2)[0]
set1 = set(adaptation_indices)
set2 = set(adaptation_indices2)
intersection = set1.intersection(set2)
adaptation_indices = list(intersection) # 取了一下交集
# Calculate weights based on z-coordinate
z_min = np.min(input_vertices[adaptation_indices, ax])
z_max = np.max(input_vertices[adaptation_indices, ax])
weights = (input_vertices[adaptation_indices, ax] - z_min) / (z_max - z_min) # 这里根据距离输入区和标准区的距离来计算权重
# Perform weighted averaging
adapted_vertices = np.copy(input_vertices)
adapted_vertices[adaptation_indices] = (1 - weights[:, np.newaxis]) * standard_vertices[adaptation_indices] + \
weights[:, np.newaxis] * input_vertices[adaptation_indices]
adapted_vertices[adaptation_indices3] = standard_vertices[adaptation_indices3]
output_vertex_lines = []
for line in adapted_vertices:
output_vertex_lines.append('v %.6f %.6f %.6f' % (line[0], line[1], line[2])) # 将结果写成字符串的形式存入文件
return adapted_vertices, output_vertex_lines, input_other_lines
standard_obj_path = 'hifi3dpp_mean_face.obj' # 标准obj文件
input_obj_path = '8.obj' # 输入obj文件
output_obj_path = 'tmp.obj' # 中间步骤
output_obj_path2 = input_obj_path.replace('.obj', '_mix.obj') # 输出结果
# 使用的obj模型是ffhq生成的模型,不同模型需要根据自己设置
# 脖子及以下的融合
threshold1 = -0.9
threshold2 = -1.2
adapted_vertices, input_vertex_lines, input_other_lines = adapt_blendshapes(standard_obj_path, input_obj_path, threshold1, threshold2, ax=1) # ax = 1 是在z轴上进行操作
write_obj(output_obj_path, adapted_vertices, input_vertex_lines, input_other_lines)
# 脸颊到后脑勺的融合
threshold1 = 0
threshold2 = -0.5
adapted_vertices, input_vertex_lines, input_other_lines = adapt_blendshapes(standard_obj_path, output_obj_path, threshold1, threshold2, ax=2) # ax = 2 是在y轴上进行操作
write_obj(output_obj_path2, adapted_vertices, input_vertex_lines, input_other_lines)
这样就实现了简单的两个头部的融合,融合效果如下:
输入的头部obj(红色)和标准头部(绿色),融合头部的模型(蓝色)将输入头部的面部和标准头部的脖颈和后脑勺结合在一起。
单看一个可能不太明显,下面是对比,首先是输入头部和融合头部的面部是完全一致的
这个是标准头部和融合头部的对比,两者的后脑勺和脖颈部分的顶点是完全一致的
特殊融合方式
融合区域(绿色区域)可以指定选取,然后指定输入区边界(黄线)和标准区边界(红线),可以实现更加灵活的效果。
其实是手动选点来替代上面的np.where操作,下面是在blender中进行的,手动选点获取索引的代码如下(你需要手动修改一下输入的txt路径):
import bpy
# 获取当前选择的对象
selected_objects = bpy.context.selected_editable_objects
with open('e:/test/t4.txt', 'w') as f:
# 确保当前选择的对象是一个网格对象
# 确保至少选取了一个对象
if selected_objects:
# 遍历每个选取的对象
for obj in selected_objects:
# 确保当前对象是一个网格对象
if obj.type == 'MESH':
# 获取当前对象的网格数据
mesh = obj.data
# 遍历当前对象的所有顶点
for vertex in mesh.vertices:
# 检查顶点是否被选取
if vertex.select:
# 输出选取的顶点索引
print("选取的顶点索引:", vertex.index)
f.write(f'{vertex.index}\n')
else:
print("没有选取任何对象")
这样获取四个部分的区域
代码上稍微做了一些修改
import numpy as np
def read_obj(file_path):
vertices = []
vertex_lines = []
other_lines = []
with open(file_path, 'r') as f:
for line in f:
if line.startswith('v '):
vertex_lines.append(line.strip())
vertex = [float(x) for x in line.strip().split()[1:]]
vertices.append(vertex)
elif line.startswith('vt ') or line.startswith('f '):
other_lines.append(line)
return np.array(vertices), vertex_lines, other_lines
def write_obj(file_path, vertices, vertex_lines, other_lines):
with open(file_path, 'w') as f:
for line in vertex_lines:
f.write(line + '\n')
for line in other_lines:
f.write(line)
def read_indices(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
data = [int(line.strip()) for line in lines]
return np.array(data)
def get_distance(target_point, point_set):
res = 99
for point in point_set:
res = min(res, np.mean((point - target_point) ** 2))
return res
def cal_weights(adapted_vertices, input_boundary_points, standard_boundary_points):
weights = []
for point in adapted_vertices:
dis1 = get_distance(point, input_boundary_points)
dis2 = get_distance(point, standard_boundary_points)
weights.append(dis2 / (dis1 + dis2))
return np.array(weights)
def adapt_blendshapes(standard_obj_path, input_obj_path):
standard_vertices, _, standard_other_lines = read_obj(standard_obj_path)
input_vertices, input_vertex_lines, input_other_lines = read_obj(input_obj_path)
# 输出顶点是在输入obj的顶点基础上进行修改的
# adaption_indices和adaption_indices2是适配区间的选取,两者需要取交集,adaption_indices3是获取标准部分的坐标点
adaptation_indices = read_indices('./适配区.txt')
standard_indices = read_indices('./标准区.txt')
input_boundary_indices = read_indices('./输入区边界.txt')
standard_boundary_indices = read_indices('./标准区边界.txt')
weights = cal_weights(input_vertices[adaptation_indices], input_vertices[input_boundary_indices], standard_vertices[standard_boundary_indices]) # 这里根据距离输入区和标准区的距离来计算权重
# Perform weighted averaging
adapted_vertices = np.copy(input_vertices)
adapted_vertices[adaptation_indices] = (1 - weights[:, np.newaxis]) * standard_vertices[adaptation_indices] + \
weights[:, np.newaxis] * input_vertices[adaptation_indices]
adapted_vertices[standard_indices] = standard_vertices[standard_indices]
output_vertex_lines = []
for line in adapted_vertices:
output_vertex_lines.append('v %.6f %.6f %.6f' % (line[0], line[1], line[2])) # 将结果写成字符串的形式存入文件
return adapted_vertices, output_vertex_lines, input_other_lines
standard_obj_path = 'hifi3dpp_mean_face.obj' # 标准obj文件
input_obj_path = '4.obj' # 输入obj文件
output_obj_path = input_obj_path.replace('.obj', '_mix2.obj') # 输出结果
# 使用的obj模型是ffhq生成的模型,不同模型需要根据自己设置
adapted_vertices, input_vertex_lines, input_other_lines = adapt_blendshapes(standard_obj_path, input_obj_path) # ax = 1 是在z轴上进行操作
write_obj(output_obj_path, adapted_vertices, input_vertex_lines, input_other_lines)
再看一下效果
输入头部(蓝色)与融合头部(红色),两个部分的面部是完全相同的
标准头部(绿色)与融合头部(红色),两个部分耳朵,后脑勺,脖颈部分完全相同
尝试了一下不同人种的效果(图片来自于百度搜索),黄种人(红),白种人(蓝),黑种人(绿)可以正常的融合到标准头部上,也没有明显的突变顶点
三个人种的面部不同,后脑勺和脖颈处的顶点是完全一致的
这个算法主要只适用于模型输出有细微差别,需要进行对齐的,如果头部偏移过大的话,可能会出问题。