You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

566 lines
21 KiB

import open3d as o3d
import os
import numpy as np
from scipy.spatial.transform import Rotation
import sys
sys.path.append("/home/algo/Documents/mask_face_occlusion/")
from colmap_loader import read_cameras_text, read_images_text, read_int_text, write_int_text, read_indices_from_file
from utils.get_pose_matrix import get_w2c
import argparse
import matplotlib.pyplot as plt
import collections
class ModelProcessor:
def __init__(self):
# argv = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument(
"--id",
required=True,
)
# parser.add_argument(
# "--mask_image",
# type=str,
# default="74_8" # 74_8 123_8 75_8 72_8 44_8 104_8 96_8
# )
args = parser.parse_args()
self.id = args.id
# self.mask_image = args.mask_image
self.mesh = None
self.asset_dir = f"/home/algo/Documents/openMVS/data/{self.id}"
self.pose_path = f"{self.asset_dir}/sparse/"
if not os.path.exists(self.pose_path):
raise FileNotFoundError(f"Camera data not found: {self.pose_path}")
def load_model(self):
"""加载并初始化3D模型"""
model_path = f"{self.asset_dir}/repair.ply"
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file not found: {model_path}")
print(model_path)
mesh_native = o3d.io.read_triangle_mesh(model_path, enable_post_processing=False)
# self.mesh = o3d.io.read_triangle_mesh(model_path, enable_post_processing=False)
self.mesh = mesh_native
#"""
print("Open3D去重前顶点数:", len(mesh_native.vertices))
# self.mesh = mesh_native.merge_close_vertices(eps=1e-6)
vertices2 = np.asarray(self.mesh.vertices)
print("Open3D去重后顶点数:", len(vertices2))
vertices2_sorted = sorted(
vertices2.tolist(),
key=lambda x: (x[0], x[1], x[2])
)
if not self.mesh.has_vertex_colors():
num_vertices = len(self.mesh.vertices)
self.mesh.vertex_colors = o3d.utility.Vector3dVector(
np.ones((num_vertices, 3))
)
self.uv_array = np.asarray(self.mesh.triangle_uvs)
# print(f"UV 坐标形状:{self.uv_array.shape}, {self.uv_array[0][1]}")
#"""
self._build_face_adjacency()
#"""
def _build_face_adjacency(self):
if not self.mesh.triangles:
return
triangles = np.asarray(self.mesh.triangles)
num_faces = len(triangles)
self.face_adjacency = [[] for _ in range(num_faces)]
# 创建边到面片的映射
edge_face_map = {}
for idx, tri in enumerate(triangles):
# 获取三条边(排序顶点保证唯一性)
edges = [
tuple(sorted([tri[0], tri[1]])),
tuple(sorted([tri[1], tri[2]])),
tuple(sorted([tri[2], tri[0]]))
]
for edge in edges:
if edge not in edge_face_map:
edge_face_map[edge] = []
edge_face_map[edge].append(idx)
# 通过共享边建立邻接关系
for edge, faces in edge_face_map.items():
if len(faces) > 1: # 只处理共享边
for i in faces:
for j in faces:
if i != j and j not in self.face_adjacency[i]:
self.face_adjacency[i].append(j)
def _expand_face_visibility(self, face_visibility):
if self.face_adjacency is None:
return face_visibility.copy()
# 使用队列实现广度优先搜索的多层扩展
expanded = face_visibility.copy()
visited = set()
queue = collections.deque()
# 初始添加所有可见面片
for face_idx, is_visible in enumerate(face_visibility):
if is_visible:
queue.append((face_idx, 0)) # (面片索引, 当前扩展层数)
visited.add(face_idx)
self.expand_radius = 10
# 广度优先扩展
while queue:
current_idx, current_radius = queue.popleft()
# 如果当前扩展层数小于目标半径,继续扩展
if current_radius < self.expand_radius:
for neighbor_idx in self.face_adjacency[current_idx]:
# 仅处理未访问过的面片
if neighbor_idx not in visited:
expanded[neighbor_idx] = True
visited.add(neighbor_idx)
# 将邻居加入队列,扩展层数+1
queue.append((neighbor_idx, current_radius + 1))
return expanded
def _shrink_face_visibility(self, face_visibility, shrink_radius=1):
if self.face_adjacency is None or shrink_radius == 0:
return face_visibility.copy()
# 创建当前可见性副本
current_visible = face_visibility.copy()
# 创建边界队列
boundary_queue = collections.deque()
# 初始化:找出所有边界面片(可见但至少有一个邻居不可见)
for idx, is_visible in enumerate(current_visible):
if not is_visible:
continue
for neighbor_idx in self.face_adjacency[idx]:
if not current_visible[neighbor_idx]:
boundary_queue.append((idx, 1)) # (面片索引, 当前圈数)
break
# 分层剥离
removed = set()
while boundary_queue:
idx, current_radius = boundary_queue.popleft()
# 如果当前面片已被移除,跳过
if idx in removed:
continue
# 如果当前圈数已达到目标圈数,标记为移除
if current_radius <= shrink_radius:
current_visible[idx] = False
removed.add(idx)
# 检查邻居:如果邻居是可见的,且尚未被标记为边界
for neighbor_idx in self.face_adjacency[idx]:
if current_visible[neighbor_idx] and neighbor_idx not in removed:
# 如果邻居现在成为边界(因为当前面片被移除)
is_boundary = False
for n_neighbor_idx in self.face_adjacency[neighbor_idx]:
if not current_visible[n_neighbor_idx]:
is_boundary = True
break
if is_boundary:
boundary_queue.append((neighbor_idx, current_radius + 1))
return current_visible
#"""
@staticmethod
def qvec2rotmat(qvec):
"""四元数转旋转矩阵"""
return Rotation.from_quat([qvec[1], qvec[2], qvec[3], qvec[0]]).as_matrix()
def _compute_vertex_in_frustum(self, fx, fy, cx, cy, R, eye, height, width, depth_map, qvec, tvec):
"""基于深度金字塔的层级式遮挡检测"""
# 坐标转换
R = self.qvec2rotmat(qvec)
w2c = get_w2c(qvec, tvec)
vertices = np.asarray(self.mesh.vertices, dtype=np.float32)
vertices_homo = np.hstack([vertices, np.ones((len(vertices), 1))])
vertices_cam = (w2c @ vertices_homo.T).T[:, :3]
# 视锥体快速剔除
valid_z = vertices_cam[:, 2] > 0
tan_fov_x = (width / 2) / fx
tan_fov_y = (height / 2) / fy
x_ratio = vertices_cam[:, 0] / vertices_cam[:, 2]
y_ratio = vertices_cam[:, 1] / vertices_cam[:, 2]
frustum_mask = valid_z & (np.abs(x_ratio) <= tan_fov_x) & (np.abs(y_ratio) <= tan_fov_y)
# 构建深度金字塔
depth_pyramid = self._build_depth_pyramid(depth_map)
# 多级遮挡检测
visible_mask, occlusion_mask = self._hierarchical_occlusion_test(
vertices_cam[frustum_mask],
depth_pyramid,
(fx, fy, cx, cy),
(height, width)
)
final_visible= np.zeros(len(vertices), dtype=bool)
final_visible[frustum_mask] = visible_mask
final_occlusion = np.zeros(len(vertices), dtype=bool)
final_occlusion[frustum_mask] = occlusion_mask
return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices)
def _build_depth_pyramid2(self, depth_map, levels=4):
"""构建深度图金字塔"""
pyramid = [depth_map.copy()]
current_level = depth_map
for _ in range(levels-1):
current_level = 0.25 * (current_level[::2, ::2] +
current_level[1::2, ::2] +
current_level[::2, 1::2] +
current_level[1::2, 1::2])
pyramid.append(current_level)
return pyramid
def _build_depth_pyramid(self, depth_map, levels=4):
pyramid = [depth_map.copy()]
current_level = depth_map
for _ in range(levels-1):
h, w = current_level.shape
# 确保尺寸可被2整除
if h % 2 != 0 or w % 2 != 0:
current_level = current_level[:h//2 * 2, :w//2 * 2] # 裁剪到最近偶尺寸
# 添加广播兼容性检查
if current_level[::2, ::2].shape != current_level[1::2, ::2].shape:
current_level = current_level[:h//2 * 2, :w//2 * 2]
current_level = 0.25 * (
current_level[::2, ::2] +
current_level[1::2, ::2] +
current_level[::2, 1::2] +
current_level[1::2, 1::2]
)
pyramid.append(current_level)
return pyramid
def _hierarchical_occlusion_test(self, vertices_cam, depth_pyramid, intrinsics, img_size):
"""层级式遮挡检测(安全版本)"""
fx, fy, cx, cy = intrinsics
height, width = img_size
# 1. 过滤无效顶点
valid_mask = vertices_cam[:, 2] > 1e-6
vertices_valid = vertices_cam[valid_mask]
if len(vertices_valid) == 0:
return (np.zeros(len(vertices_cam), dtype=bool),
np.zeros(len(vertices_cam), dtype=bool))
visible = np.zeros(len(vertices_valid), dtype=bool)
occlusion = np.zeros(len(vertices_valid), dtype=bool)
# 2. 层级检测
for level in reversed(range(len(depth_pyramid))):
scale = 2 ** level
current_depth = depth_pyramid[level]
h, w = current_depth.shape
# 安全构造内参矩阵
K = np.array([
[max(fx/(scale + 1e-6), 1e-6), 0, (cx - 0.5)/scale + 0.5],
[0, max(fy/(scale + 1e-6), 1e-6), (cy - 0.5)/scale + 0.5],
[0, 0, 1]
], dtype=np.float32)
# 投影计算
uv_homo = (K @ vertices_valid.T).T
uv = uv_homo[:, :2] / uv_homo[:, 2:3]
# 安全边界处理
u = np.clip(uv[:, 0], 0.0, float(w-1))
v = np.clip(uv[:, 1], 0.0, float(h-1))
# 转换为整数索引
u_idx = np.clip(np.floor(u).astype(np.int32), 0, w-1)
v_idx = np.clip(np.floor(v).astype(np.int32), 0, h-1)
# 采样深度值
depth_vals = current_depth[v_idx, u_idx]
# 深度比较
level_tol = 0.0008 * (2 ** level) # 0.005 0.0008
visible |= (vertices_valid[:, 2] <= (depth_vals + level_tol))
occlusion |= (vertices_valid[:, 2] > (depth_vals + level_tol))
# 3. 结果映射
final_visible = np.zeros(len(vertices_cam), dtype=bool)
final_visible[valid_mask] = visible
final_occlusion = np.zeros(len(vertices_cam), dtype=bool)
final_occlusion[valid_mask] = occlusion
return final_visible, final_occlusion
def _occlusion_expansion(self, occlusion_mask, vertices, radius=0.0008):
"""基于空间哈希的快速遮挡扩展"""
from collections import defaultdict
# 构建空间哈希
grid_size = radius * 2
hash_table = defaultdict(list)
# 量化顶点坐标
quantized = (vertices / grid_size).astype(int)
for idx, (x, y, z) in enumerate(quantized):
hash_table[(x, y, z)].append(idx)
# 扩展遮挡区域
dilated_mask = occlusion_mask.copy()
occluded_indices = np.where(occlusion_mask)[0]
for idx in occluded_indices:
x, y, z = quantized[idx]
# 查询邻近27个网格
for dx in (-1, 0, 1):
for dy in (-1, 0, 1):
for dz in (-1, 0, 1):
neighbor_cell = (x+dx, y+dy, z+dz)
dilated_mask[hash_table.get(neighbor_cell, [])] = True
return dilated_mask.tolist()
def _gen_depth_image(self, cam_data, render):
"""生成深度图"""
qvec = cam_data['qvec']
tvec = cam_data['tvec']
fx = cam_data['fx']
fy = cam_data['fy']
cx = cam_data['cx']
cy = cam_data['cy']
width = cam_data['width']
height = cam_data['height']
intrinsics = o3d.camera.PinholeCameraIntrinsic(
width, height, fx=fx, fy=fy, cx=cx, cy=cy)
w2c = get_w2c(qvec, tvec)
# print(np.linalg.inv(w2c))
# 配置渲染器
render.setup_camera(intrinsics, w2c)
depth = render.render_to_depth_image(z_in_view_space=True)
return np.asarray(depth)
def sort_vertices(vertices_original):
return sorted(
(v for v in vertices_original),
key=lambda v: (v.co.x, v.co.y, v.co.z)
)
def _flag_model(self, camera_data, face_points):
"""标记可见顶点"""
vertex_visible = []
vertex_occlusion = []
depth_images = []
render = o3d.visualization.rendering.OffscreenRenderer(camera_data['width'], camera_data['height'])
material = o3d.visualization.rendering.MaterialRecord()
render.scene.add_geometry("mesh", self.mesh, material)
# 生成深度图
depth_image = self._gen_depth_image(camera_data, render)
# 计算可见性
R = self.qvec2rotmat(camera_data['qvec']).T
eye = -R @ camera_data['tvec']
# eye = camera_data['tvec']
final_visible_list, final_occlusion_list = self._compute_vertex_in_frustum(
camera_data['fx'], camera_data['fy'],
camera_data['cx'], camera_data['cy'],
R, eye,
camera_data['height'], camera_data['width'],
depth_image,camera_data['qvec'], camera_data['tvec']
)
print("_flag_model", len(final_occlusion_list), len(self.mesh.vertices), len(self.mesh.vertex_colors))
"""
vertices = np.asarray(self.mesh.vertices)
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)}
vertex_colors = np.asarray(self.mesh.vertex_colors)
if face_points==None:
for vertex_id, coord in enumerate(self.mesh.vertices):
if final_occlusion_list[vertex_id]:
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_遮挡判断.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
else:
list_id = []
# sorted_verts = self.sort_vertices(self.mesh.vertices)
sorted_verts =sorted(
(tuple(v.tolist()) for v in vertices),
key=lambda v: (v[0], v[1], v[2])
)
dict_s_o = {}
dict_o_s = {}
for sorted_idx, sorted_v in enumerate(sorted_verts):
original_idx = vertex_index_map[sorted_v]
dict_s_o[sorted_idx] = original_idx
dict_o_s[original_idx] = sorted_idx
for vertex_id, coord in enumerate(self.mesh.vertices):
# print(vertex_id, coord)
if final_occlusion_list[vertex_id]:
if dict_o_s[vertex_id] in face_points:
list_id.append(dict_o_s[vertex_id])
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
write_int_text(list_id, os.path.join(self.pose_path, "face_points_mask.txt"))
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_脸部遮挡判断.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
"""
# 获取三角形面片数组
triangles = np.asarray(self.mesh.triangles)
face_visible_bitmap = np.zeros(len(triangles), dtype=bool)
# 遍历所有面片
for face_idx, face in enumerate(triangles):
v0, v1, v2 = face
face_visible_bitmap[face_idx] = any([ # any all
final_visible_list[v0],
final_visible_list[v1],
final_visible_list[v2]
])
# return face_visible_bitmap
# expanded_visibility = self._expand_face_visibility(face_visible_bitmap)
# return expanded_visibility
shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10
return shrunk_visibility
"""
def _mask_face_occlusion(self):
# 读取相机数据
cameras = read_cameras_text(os.path.join(self.pose_path, "cameras.txt"))
images = read_images_text(os.path.join(self.pose_path, "images.txt"))
# cameras = read_cameras_text(os.path.join(self.pose_path, "backup_cameras.txt"))
# images = read_images_text(os.path.join(self.pose_path, "backup_images.txt"))
face_points_sorted_path = os.path.join(self.pose_path, "face_points_sorted.txt")
print("face_points_sorted_path=", face_points_sorted_path)
#face_points = read_int_text(face_points_sorted_path)
face_points = read_indices_from_file(face_points_sorted_path)
# face_points = {}
camera_data = {}
for img in images.values():
if self.mask_image == img.name[:-4]:
camera = cameras[img.camera_id]
camera_data = {
"qvec": img.qvec,
"tvec": img.tvec,
"fx": camera.params[0],
"fy": camera.params[1],
"cx": camera.params[2],
"cy": camera.params[3],
"width": camera.width,
"height": camera.height,
"name": img.name[:-4]
}
# print(face_points)
self._flag_model(camera_data, face_points)
"""
def _mask_occlusion(self):
# 读取相机数据
cameras = read_cameras_text(os.path.join(self.pose_path, "cameras.txt"))
images = read_images_text(os.path.join(self.pose_path, "images.txt"))
camera_data = {}
"""
for img in images.values():
if self.mask_image == img.name[:-4]:
camera = cameras[img.camera_id]
camera_data = {
"qvec": img.qvec,
"tvec": img.tvec,
"fx": camera.params[0],
"fy": camera.params[1],
"cx": camera.params[2],
"cy": camera.params[3],
"width": camera.width,
"height": camera.height,
"name": img.name[:-4]
}
return self._flag_model(camera_data, None)
"""
visible_faces_dict = {}
for img in images.values():
camera = cameras[img.camera_id]
camera_data = {
"qvec": img.qvec,
"tvec": img.tvec,
"fx": camera.params[0],
"fy": camera.params[1],
"cx": camera.params[2],
"cy": camera.params[3],
"width": camera.width,
"height": camera.height,
"name": img.name[:-4]
}
img_name = img.name[:-4]
print("img_name=", img_name)
# if (img_name!="72_2" and img_name!="82_2" and img_name!="83_2"): # 82_2 72_2
#if (img_name!="82_2"):
# continue
face_visibility = self._flag_model(camera_data, None)
visible_faces_dict[img.name[:-4]] = np.where(face_visibility)[0].tolist()
return visible_faces_dict
def process(self):
print("process")
self.load_model()
try:
# 处理物理相机生成遮挡判断
# self._mask_face_occlusion()
return self._mask_occlusion()
except Exception as e:
print(f"Error during processing: {str(e)}")
raise
if __name__ == "__main__":
ModelProcessor().process()