|
|
|
|
@ -1,3 +1,4 @@
@@ -1,3 +1,4 @@
|
|
|
|
|
import time |
|
|
|
|
import open3d as o3d |
|
|
|
|
import os |
|
|
|
|
import numpy as np |
|
|
|
|
@ -9,6 +10,9 @@ from get_pose_matrix import get_w2c
@@ -9,6 +10,9 @@ from get_pose_matrix import get_w2c
|
|
|
|
|
import argparse |
|
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
import collections |
|
|
|
|
import torch |
|
|
|
|
import torch.nn.functional as F |
|
|
|
|
from torch.utils.dlpack import to_dlpack, from_dlpack |
|
|
|
|
|
|
|
|
|
class ModelProcessor: |
|
|
|
|
def __init__(self): |
|
|
|
|
@ -54,6 +58,10 @@ class ModelProcessor:
@@ -54,6 +58,10 @@ class ModelProcessor:
|
|
|
|
|
if not os.path.exists(self.pose_path): |
|
|
|
|
raise FileNotFoundError(f"Camera data not found: {self.pose_path}") |
|
|
|
|
|
|
|
|
|
# GPU设备设置 |
|
|
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
print(f"Using device: {self.device}") |
|
|
|
|
|
|
|
|
|
self.mesh = None |
|
|
|
|
|
|
|
|
|
def load_model(self): |
|
|
|
|
@ -89,9 +97,370 @@ class ModelProcessor:
@@ -89,9 +97,370 @@ class ModelProcessor:
|
|
|
|
|
self.uv_array = np.asarray(self.mesh.triangle_uvs) |
|
|
|
|
# print(f"UV 坐标形状:{self.uv_array.shape}, {self.uv_array[0][1]}") |
|
|
|
|
#""" |
|
|
|
|
#""" |
|
|
|
|
# 将数据转移到GPU |
|
|
|
|
vertices = np.asarray(self.mesh.vertices, dtype=np.float32) |
|
|
|
|
triangles = np.asarray(self.mesh.triangles, dtype=np.int32) |
|
|
|
|
|
|
|
|
|
# 转换为PyTorch张量并转移到GPU |
|
|
|
|
self.vertices_tensor = torch.from_numpy(vertices).to(self.device) |
|
|
|
|
self.triangles_tensor = torch.from_numpy(triangles).to(self.device) |
|
|
|
|
|
|
|
|
|
print(f"Loaded {len(vertices)} vertices and {len(triangles)} triangles and {len(self.triangles_tensor)} triangles_tensor") |
|
|
|
|
|
|
|
|
|
self._build_face_adjacency_gpu() |
|
|
|
|
#""" |
|
|
|
|
# self._build_face_adjacency() |
|
|
|
|
|
|
|
|
|
if not self.mesh.has_vertex_colors(): |
|
|
|
|
num_vertices = len(self.mesh.vertices) |
|
|
|
|
self.mesh.vertex_colors = o3d.utility.Vector3dVector( |
|
|
|
|
np.ones((num_vertices, 3)) |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
def _build_face_adjacency_gpu(self): |
|
|
|
|
"""优化的GPU版本面片邻接关系构建""" |
|
|
|
|
if len(self.triangles_tensor) == 0: |
|
|
|
|
return |
|
|
|
|
|
|
|
|
|
triangles = self.triangles_tensor.cpu().numpy() # 转到CPU处理 |
|
|
|
|
num_faces = len(triangles) |
|
|
|
|
|
|
|
|
|
# 使用更高效的方法构建边-面映射 |
|
|
|
|
edge_face_map = {} |
|
|
|
|
|
|
|
|
|
for face_idx, tri in enumerate(triangles): |
|
|
|
|
# 获取三条边(排序顶点保证唯一性) |
|
|
|
|
edges = [ |
|
|
|
|
tuple(sorted([tri[0], tri[1]])), |
|
|
|
|
tuple(sorted([tri[1], tri[2]])), |
|
|
|
|
tuple(sorted([tri[2], tri[0]])) |
|
|
|
|
] |
|
|
|
|
|
|
|
|
|
for edge in edges: |
|
|
|
|
if edge not in edge_face_map: |
|
|
|
|
edge_face_map[edge] = [] |
|
|
|
|
edge_face_map[edge].append(face_idx) |
|
|
|
|
|
|
|
|
|
# 构建邻接关系 |
|
|
|
|
self.face_adjacency = [[] for _ in range(num_faces)] |
|
|
|
|
|
|
|
|
|
adjacency_count = 0 |
|
|
|
|
for edge, faces in edge_face_map.items(): |
|
|
|
|
if len(faces) > 1: # 只处理共享边 |
|
|
|
|
for i in faces: |
|
|
|
|
for j in faces: |
|
|
|
|
if i != j: |
|
|
|
|
if j not in self.face_adjacency[i]: |
|
|
|
|
self.face_adjacency[i].append(j) |
|
|
|
|
adjacency_count += 1 |
|
|
|
|
|
|
|
|
|
print(f"邻接关系构建完成:") |
|
|
|
|
print(f"- 面片总数: {num_faces}") |
|
|
|
|
print(f"- 边总数: {len(edge_face_map)}") |
|
|
|
|
print(f"- 共享边数: {len([f for f in edge_face_map.values() if len(f) > 1])}") |
|
|
|
|
print(f"- 邻接关系数: {adjacency_count}") |
|
|
|
|
|
|
|
|
|
def _build_depth_pyramid_gpu(self, depth_map, levels=4): |
|
|
|
|
"""GPU版本的深度金字塔构建""" |
|
|
|
|
if not isinstance(depth_map, torch.Tensor): |
|
|
|
|
depth_tensor = torch.from_numpy(depth_map).float().to(self.device) |
|
|
|
|
else: |
|
|
|
|
depth_tensor = depth_map.float() |
|
|
|
|
|
|
|
|
|
pyramid = [depth_tensor] |
|
|
|
|
current_level = depth_tensor |
|
|
|
|
|
|
|
|
|
for _ in range(levels-1): |
|
|
|
|
# 使用平均池化进行下采样 |
|
|
|
|
current_level = current_level.unsqueeze(0).unsqueeze(0) # 添加batch和channel维度 |
|
|
|
|
current_level = F.avg_pool2d(current_level, kernel_size=2, stride=2) |
|
|
|
|
current_level = current_level.squeeze(0).squeeze(0) |
|
|
|
|
pyramid.append(current_level) |
|
|
|
|
|
|
|
|
|
return pyramid |
|
|
|
|
|
|
|
|
|
def _hierarchical_occlusion_test_gpu(self, vertices_cam, depth_pyramid, intrinsics, img_size): |
|
|
|
|
"""GPU版本的层级遮挡检测 - 直接计算方法""" |
|
|
|
|
fx, fy, cx, cy = intrinsics |
|
|
|
|
height, width = img_size |
|
|
|
|
|
|
|
|
|
# 过滤无效顶点 |
|
|
|
|
valid_mask = vertices_cam[:, 2] > 1e-6 |
|
|
|
|
vertices_valid = vertices_cam[valid_mask] |
|
|
|
|
|
|
|
|
|
if len(vertices_valid) == 0: |
|
|
|
|
return (torch.zeros(len(vertices_cam), dtype=torch.bool, device=self.device), |
|
|
|
|
torch.zeros(len(vertices_cam), dtype=torch.bool, device=self.device)) |
|
|
|
|
|
|
|
|
|
visible = torch.zeros(len(vertices_valid), dtype=torch.bool, device=self.device) |
|
|
|
|
occlusion = torch.zeros(len(vertices_valid), dtype=torch.bool, device=self.device) |
|
|
|
|
|
|
|
|
|
# 批量处理所有层级 |
|
|
|
|
for level in reversed(range(len(depth_pyramid))): |
|
|
|
|
scale = 2 ** level |
|
|
|
|
current_depth = depth_pyramid[level] |
|
|
|
|
h, w = current_depth.shape |
|
|
|
|
|
|
|
|
|
# 直接计算投影坐标,避免矩阵乘法 |
|
|
|
|
x = vertices_valid[:, 0] |
|
|
|
|
y = vertices_valid[:, 1] |
|
|
|
|
z = vertices_valid[:, 2] |
|
|
|
|
|
|
|
|
|
# 缩放的内参 |
|
|
|
|
fx_scaled = max(fx/(scale + 1e-6), 1e-6) |
|
|
|
|
fy_scaled = max(fy/(scale + 1e-6), 1e-6) |
|
|
|
|
cx_scaled = (cx - 0.5)/scale + 0.5 |
|
|
|
|
cy_scaled = (cy - 0.5)/scale + 0.5 |
|
|
|
|
|
|
|
|
|
# 投影计算 |
|
|
|
|
u = (x / z) * fx_scaled + cx_scaled |
|
|
|
|
v = (y / z) * fy_scaled + cy_scaled |
|
|
|
|
|
|
|
|
|
# 边界处理 |
|
|
|
|
u = torch.clamp(u, 0.0, float(w-1)) |
|
|
|
|
v = torch.clamp(v, 0.0, float(h-1)) |
|
|
|
|
|
|
|
|
|
# 转换为整数索引 |
|
|
|
|
u_idx = torch.clamp(torch.floor(u).long(), 0, w-1) |
|
|
|
|
v_idx = torch.clamp(torch.floor(v).long(), 0, h-1) |
|
|
|
|
|
|
|
|
|
# 批量采样深度值 |
|
|
|
|
depth_vals = current_depth[v_idx, u_idx] |
|
|
|
|
|
|
|
|
|
# 批量深度比较 |
|
|
|
|
level_tol = 0.0008 * (2 ** level) |
|
|
|
|
visible |= (z <= (depth_vals + level_tol)) |
|
|
|
|
occlusion |= (z > (depth_vals + level_tol)) |
|
|
|
|
|
|
|
|
|
# 映射回原始顶点数量 |
|
|
|
|
final_visible = torch.zeros(len(vertices_cam), dtype=torch.bool, device=self.device) |
|
|
|
|
final_visible[valid_mask] = visible |
|
|
|
|
|
|
|
|
|
final_occlusion = torch.zeros(len(vertices_cam), dtype=torch.bool, device=self.device) |
|
|
|
|
final_occlusion[valid_mask] = occlusion |
|
|
|
|
|
|
|
|
|
return final_visible, final_occlusion |
|
|
|
|
|
|
|
|
|
def _compute_vertex_in_frustum_gpu(self, fx, fy, cx, cy, R, eye, height, width, depth_map, qvec, tvec): |
|
|
|
|
"""GPU版本的视锥体计算和遮挡检测""" |
|
|
|
|
print(f"开始 _compute_vertex_in_frustum_gpu") |
|
|
|
|
|
|
|
|
|
# 直接使用get_w2c,避免重复计算 |
|
|
|
|
w2c = get_w2c(qvec, tvec) |
|
|
|
|
|
|
|
|
|
# 确保w2c是4x4矩阵 |
|
|
|
|
if w2c.shape != (4, 4): |
|
|
|
|
if w2c.shape == (3, 4): |
|
|
|
|
w2c_4x4 = np.eye(4) |
|
|
|
|
w2c_4x4[:3, :] = w2c |
|
|
|
|
w2c = w2c_4x4 |
|
|
|
|
else: |
|
|
|
|
raise ValueError(f"w2c matrix has unexpected shape: {w2c.shape}") |
|
|
|
|
|
|
|
|
|
# 使用GPU张量 |
|
|
|
|
vertices = self.vertices_tensor.float() |
|
|
|
|
ones = torch.ones(len(vertices), 1, device=self.device) |
|
|
|
|
vertices_homo = torch.cat([vertices, ones], dim=1) |
|
|
|
|
|
|
|
|
|
w2c_tensor = torch.tensor(w2c, device=self.device, dtype=torch.float32) |
|
|
|
|
# 简化矩阵乘法 |
|
|
|
|
vertices_cam_homo = (w2c_tensor @ vertices_homo.T).T |
|
|
|
|
vertices_cam = vertices_cam_homo[:, :3] |
|
|
|
|
|
|
|
|
|
self._build_face_adjacency() |
|
|
|
|
# 视锥体快速剔除 |
|
|
|
|
valid_z = vertices_cam[:, 2] > 0 |
|
|
|
|
tan_fov_x = (width / 2) / fx |
|
|
|
|
tan_fov_y = (height / 2) / fy |
|
|
|
|
x_ratio = vertices_cam[:, 0] / vertices_cam[:, 2] |
|
|
|
|
y_ratio = vertices_cam[:, 1] / vertices_cam[:, 2] |
|
|
|
|
frustum_mask = valid_z & (torch.abs(x_ratio) <= tan_fov_x) & (torch.abs(y_ratio) <= tan_fov_y) |
|
|
|
|
|
|
|
|
|
# 构建深度金字塔 |
|
|
|
|
depth_pyramid = self._build_depth_pyramid_gpu(depth_map) |
|
|
|
|
|
|
|
|
|
# 多级遮挡检测 |
|
|
|
|
visible_mask, occlusion_mask = self._hierarchical_occlusion_test_gpu( |
|
|
|
|
vertices_cam, depth_pyramid, (fx, fy, cx, cy), (height, width) |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
final_visible = torch.zeros(len(vertices), dtype=torch.bool, device=self.device) |
|
|
|
|
final_visible[frustum_mask] = visible_mask[frustum_mask] |
|
|
|
|
|
|
|
|
|
final_occlusion = torch.zeros(len(vertices), dtype=torch.bool, device=self.device) |
|
|
|
|
final_occlusion[frustum_mask] = occlusion_mask[frustum_mask] |
|
|
|
|
|
|
|
|
|
# 转换为numpy数组返回 |
|
|
|
|
# return (final_visible.cpu().numpy().tolist(), |
|
|
|
|
# self._occlusion_expansion_gpu(final_occlusion, vertices.cpu().numpy())) |
|
|
|
|
# 转换为numpy数组返回 |
|
|
|
|
return (final_visible.cpu().numpy().tolist()) |
|
|
|
|
|
|
|
|
|
def _occlusion_expansion_gpu(self, occlusion_mask, vertices, radius=0.0008): |
|
|
|
|
"""GPU版本的空间哈希遮挡扩展""" |
|
|
|
|
if not isinstance(occlusion_mask, torch.Tensor): |
|
|
|
|
occlusion_tensor = torch.from_numpy(occlusion_mask).to(self.device) |
|
|
|
|
vertices_tensor = torch.from_numpy(vertices).to(self.device) |
|
|
|
|
else: |
|
|
|
|
occlusion_tensor = occlusion_mask |
|
|
|
|
vertices_tensor = torch.from_numpy(vertices).to(self.device) |
|
|
|
|
|
|
|
|
|
# 构建空间哈希 |
|
|
|
|
grid_size = radius * 2 |
|
|
|
|
quantized = (vertices_tensor / grid_size).long() |
|
|
|
|
|
|
|
|
|
# 使用CUDA加速的哈希表 |
|
|
|
|
from collections import defaultdict |
|
|
|
|
hash_table = defaultdict(list) |
|
|
|
|
|
|
|
|
|
# 将数据移回CPU进行哈希构建(这部分在GPU上实现较复杂) |
|
|
|
|
quantized_cpu = quantized.cpu().numpy() |
|
|
|
|
for idx, (x, y, z) in enumerate(quantized_cpu): |
|
|
|
|
hash_table[(x, y, z)].append(idx) |
|
|
|
|
|
|
|
|
|
# 扩展遮挡区域 |
|
|
|
|
dilated_mask = occlusion_tensor.cpu().numpy().copy() |
|
|
|
|
occluded_indices = np.where(occlusion_tensor.cpu().numpy())[0] |
|
|
|
|
|
|
|
|
|
for idx in occluded_indices: |
|
|
|
|
x, y, z = quantized_cpu[idx] |
|
|
|
|
for dx in (-1, 0, 1): |
|
|
|
|
for dy in (-1, 0, 1): |
|
|
|
|
for dz in (-1, 0, 1): |
|
|
|
|
neighbor_cell = (x+dx, y+dy, z+dz) |
|
|
|
|
for neighbor_idx in hash_table.get(neighbor_cell, []): |
|
|
|
|
dilated_mask[neighbor_idx] = True |
|
|
|
|
|
|
|
|
|
return dilated_mask.tolist() |
|
|
|
|
|
|
|
|
|
def _gen_depth_image_gpu(self, cam_data, render): |
|
|
|
|
"""生成深度图(保持原样,因为Open3D渲染器可能不支持GPU)""" |
|
|
|
|
# Open3D的渲染器目前主要在CPU上工作 |
|
|
|
|
return self._gen_depth_image(cam_data, render) |
|
|
|
|
|
|
|
|
|
def _flag_model_gpu(self, camera_data, face_points=None): |
|
|
|
|
|
|
|
|
|
# 确保使用正确的深度图生成方式 |
|
|
|
|
render = o3d.visualization.rendering.OffscreenRenderer( |
|
|
|
|
camera_data['width'], camera_data['height']) |
|
|
|
|
|
|
|
|
|
material = o3d.visualization.rendering.MaterialRecord() |
|
|
|
|
render.scene.add_geometry("mesh", self.mesh, material) |
|
|
|
|
|
|
|
|
|
# 生成深度图 - 确保与CPU版本一致 |
|
|
|
|
depth_image = self._gen_depth_image_gpu(camera_data, render) |
|
|
|
|
|
|
|
|
|
# 使用与CPU版本相同的参数计算 |
|
|
|
|
R = self.qvec2rotmat(camera_data['qvec']).T |
|
|
|
|
eye = -R @ camera_data['tvec'] |
|
|
|
|
|
|
|
|
|
# final_visible_list, final_occlusion_list = self._compute_vertex_in_frustum_gpu( |
|
|
|
|
final_visible_list = self._compute_vertex_in_frustum_gpu( |
|
|
|
|
camera_data['fx'], camera_data['fy'], |
|
|
|
|
camera_data['cx'], camera_data['cy'], |
|
|
|
|
R, eye, |
|
|
|
|
camera_data['height'], camera_data['width'], |
|
|
|
|
depth_image, camera_data['qvec'], camera_data['tvec'] |
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
# 确保使用正确的张量设备 |
|
|
|
|
final_visible_tensor = torch.tensor(final_visible_list, device=self.device) |
|
|
|
|
triangles_tensor = self.triangles_tensor # 直接使用已加载的GPU张量 |
|
|
|
|
|
|
|
|
|
# 向量化计算面片可见性 |
|
|
|
|
v0_indices = triangles_tensor[:, 0] |
|
|
|
|
v1_indices = triangles_tensor[:, 1] |
|
|
|
|
v2_indices = triangles_tensor[:, 2] |
|
|
|
|
|
|
|
|
|
v0_visible = final_visible_tensor[v0_indices] |
|
|
|
|
v1_visible = final_visible_tensor[v1_indices] |
|
|
|
|
v2_visible = final_visible_tensor[v2_indices] |
|
|
|
|
|
|
|
|
|
face_visible = v0_visible | v1_visible | v2_visible |
|
|
|
|
|
|
|
|
|
# 使用与CPU版本相同的后续处理 |
|
|
|
|
shrunk_visibility = self._shrink_face_visibility(face_visible.cpu().numpy(), 6) |
|
|
|
|
expanded_visibility = self._expand_face_visibility(face_visible.cpu().numpy(), 30) |
|
|
|
|
shrunk_visibility2 = self._shrink_face_visibility(face_visible.cpu().numpy(), 50) |
|
|
|
|
expanded_edge = expanded_visibility & ~shrunk_visibility2 |
|
|
|
|
delete_edge = face_visible.cpu().numpy() & ~shrunk_visibility |
|
|
|
|
|
|
|
|
|
return shrunk_visibility, expanded_edge, delete_edge |
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
def _gen_depth_image_gpu(self, cam_data, render): |
|
|
|
|
# 复制CPU版本的逻辑 |
|
|
|
|
qvec = cam_data['qvec'] |
|
|
|
|
tvec = cam_data['tvec'] |
|
|
|
|
fx = cam_data['fx'] |
|
|
|
|
fy = cam_data['fy'] |
|
|
|
|
cx = cam_data['cx'] |
|
|
|
|
cy = cam_data['cy'] |
|
|
|
|
width = cam_data['width'] |
|
|
|
|
height = cam_data['height'] |
|
|
|
|
|
|
|
|
|
intrinsics = o3d.camera.PinholeCameraIntrinsic( |
|
|
|
|
width, height, fx=fx, fy=fy, cx=cx, cy=cy) |
|
|
|
|
w2c = get_w2c(qvec, tvec) |
|
|
|
|
|
|
|
|
|
render.setup_camera(intrinsics, w2c) |
|
|
|
|
depth = render.render_to_depth_image(z_in_view_space=True) |
|
|
|
|
return np.asarray(depth) # 确保返回numpy数组 |
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
def _mask_occlusion_gpu(self): |
|
|
|
|
"""GPU版本的多相机遮挡检测""" |
|
|
|
|
cameras = read_cameras_text(os.path.join(self.pose_path, "cameras.txt")) |
|
|
|
|
images = read_images_text(os.path.join(self.pose_path, "images.txt")) |
|
|
|
|
|
|
|
|
|
visible_faces_dict = {} |
|
|
|
|
edge_faces_dict = {} |
|
|
|
|
delete_edge_faces_dict = {} |
|
|
|
|
|
|
|
|
|
total_start = time.time() |
|
|
|
|
|
|
|
|
|
for n, img in enumerate(images.values()): |
|
|
|
|
camera = cameras[img.camera_id] |
|
|
|
|
camera_data = { |
|
|
|
|
"qvec": img.qvec, |
|
|
|
|
"tvec": img.tvec, |
|
|
|
|
"fx": camera.params[0], |
|
|
|
|
"fy": camera.params[1], |
|
|
|
|
"cx": camera.params[2], |
|
|
|
|
"cy": camera.params[3], |
|
|
|
|
"width": camera.width, |
|
|
|
|
"height": camera.height, |
|
|
|
|
"name": img.name[:-4] |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
img_name = img.name[:-4] |
|
|
|
|
print(f"处理图像 {img_name} ({n+1}/{len(images)})") |
|
|
|
|
# if (img_name!="73_8" and img_name!="52_8" and img_name!="62_8"): |
|
|
|
|
# if (img_name!="52_8" and img_name!="62_8"): |
|
|
|
|
# if (img_name!="52_8"): |
|
|
|
|
# continue |
|
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
|
face_visibility, face_edge, face_delete_edge = self._flag_model_gpu(camera_data) |
|
|
|
|
processing_time = time.time() - start_time |
|
|
|
|
|
|
|
|
|
visible_faces = np.where(face_visibility)[0].tolist() |
|
|
|
|
visible_faces_dict[img_name] = visible_faces |
|
|
|
|
edge_faces_dict[img_name] = np.where(face_edge)[0].tolist() |
|
|
|
|
delete_edge_faces_dict[img_name] = np.where(face_delete_edge)[0].tolist() |
|
|
|
|
|
|
|
|
|
print(f"图像 {img_name} 处理完成,耗时: {processing_time:.2f}秒,可见面数量{len(visible_faces)}") |
|
|
|
|
|
|
|
|
|
total_time = time.time() - total_start |
|
|
|
|
print(f"所有图像处理完成,总耗时: {total_time:.2f}秒") |
|
|
|
|
print(f"平均每张图像耗时: {total_time/len(images):.2f}秒") |
|
|
|
|
|
|
|
|
|
return { |
|
|
|
|
"result1": visible_faces_dict, |
|
|
|
|
"result2": edge_faces_dict, |
|
|
|
|
"result3": delete_edge_faces_dict |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#""" |
|
|
|
|
def _build_face_adjacency(self): |
|
|
|
|
if not self.mesh.triangles: |
|
|
|
|
@ -617,53 +986,10 @@ class ModelProcessor:
@@ -617,53 +986,10 @@ class ModelProcessor:
|
|
|
|
|
) |
|
|
|
|
print("_flag_model", len(final_occlusion_list), len(self.mesh.vertices), len(self.mesh.vertex_colors)) |
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
vertices = np.asarray(self.mesh.vertices) |
|
|
|
|
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} |
|
|
|
|
|
|
|
|
|
vertex_colors = np.asarray(self.mesh.vertex_colors) |
|
|
|
|
if face_points==None: |
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
if final_occlusion_list[vertex_id]: |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_遮挡判断.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
else: |
|
|
|
|
list_id = [] |
|
|
|
|
# sorted_verts = self.sort_vertices(self.mesh.vertices) |
|
|
|
|
sorted_verts =sorted( |
|
|
|
|
(tuple(v.tolist()) for v in vertices), |
|
|
|
|
key=lambda v: (v[0], v[1], v[2]) |
|
|
|
|
) |
|
|
|
|
dict_s_o = {} |
|
|
|
|
dict_o_s = {} |
|
|
|
|
for sorted_idx, sorted_v in enumerate(sorted_verts): |
|
|
|
|
original_idx = vertex_index_map[sorted_v] |
|
|
|
|
dict_s_o[sorted_idx] = original_idx |
|
|
|
|
dict_o_s[original_idx] = sorted_idx |
|
|
|
|
|
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
# print(vertex_id, coord) |
|
|
|
|
if final_occlusion_list[vertex_id]: |
|
|
|
|
if dict_o_s[vertex_id] in face_points: |
|
|
|
|
list_id.append(dict_o_s[vertex_id]) |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
write_int_text(list_id, os.path.join(self.pose_path, "face_points_mask.txt")) |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_脸部遮挡判断.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
#""" |
|
|
|
|
# 获取三角形面片数组 |
|
|
|
|
triangles = np.asarray(self.mesh.triangles) |
|
|
|
|
|
|
|
|
|
face_visible_bitmap = np.zeros(len(triangles), dtype=bool) |
|
|
|
|
# face_edge_bitmap = np.zeros(len(triangles), dtype=bool) |
|
|
|
|
|
|
|
|
|
# 遍历所有面片 |
|
|
|
|
for face_idx, face in enumerate(triangles): |
|
|
|
|
@ -675,175 +1001,12 @@ class ModelProcessor:
@@ -675,175 +1001,12 @@ class ModelProcessor:
|
|
|
|
|
final_visible_list[v2] |
|
|
|
|
]) |
|
|
|
|
|
|
|
|
|
# threshold = 0.5 |
|
|
|
|
# face_edge_bitmap[face_idx] = all([ # any all |
|
|
|
|
# final_vertex_difference_list[v0] < threshold, |
|
|
|
|
# final_vertex_difference_list[v1] < threshold, |
|
|
|
|
# final_vertex_difference_list[v2] < threshold |
|
|
|
|
# ]) |
|
|
|
|
|
|
|
|
|
# return face_visible_bitmap |
|
|
|
|
shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10 |
|
|
|
|
|
|
|
|
|
# 16,13;13,16;16,16 |
|
|
|
|
expanded_visibility = self._expand_face_visibility(face_visible_bitmap, 30) |
|
|
|
|
shrunk_visibility2 = self._shrink_face_visibility(face_visible_bitmap, 50) |
|
|
|
|
expanded_edge = expanded_visibility & ~shrunk_visibility2 |
|
|
|
|
delete_edge = face_visible_bitmap & ~shrunk_visibility |
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
# 创建顶点可见性映射(基于面片可见性) |
|
|
|
|
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) |
|
|
|
|
# 遍历所有面片,将可见面片的顶点标记为可见 |
|
|
|
|
for face_idx, face in enumerate(triangles): |
|
|
|
|
# if expanded_edge[face_idx] and face_edge_bitmap[face_idx] : |
|
|
|
|
if delete_edge[face_idx] : |
|
|
|
|
vertex_visibility[face[0]] = True |
|
|
|
|
vertex_visibility[face[1]] = True |
|
|
|
|
vertex_visibility[face[2]] = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vertices = np.asarray(self.mesh.vertices) |
|
|
|
|
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} |
|
|
|
|
|
|
|
|
|
vertex_colors = np.asarray(self.mesh.vertex_colors) |
|
|
|
|
if face_points==None: |
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
else: |
|
|
|
|
list_id = [] |
|
|
|
|
# sorted_verts = self.sort_vertices(self.mesh.vertices) |
|
|
|
|
sorted_verts =sorted( |
|
|
|
|
(tuple(v.tolist()) for v in vertices), |
|
|
|
|
key=lambda v: (v[0], v[1], v[2]) |
|
|
|
|
) |
|
|
|
|
dict_s_o = {} |
|
|
|
|
dict_o_s = {} |
|
|
|
|
for sorted_idx, sorted_v in enumerate(sorted_verts): |
|
|
|
|
original_idx = vertex_index_map[sorted_v] |
|
|
|
|
dict_s_o[sorted_idx] = original_idx |
|
|
|
|
dict_o_s[original_idx] = sorted_idx |
|
|
|
|
|
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
# print(vertex_id, coord) |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
if dict_o_s[vertex_id] in face_points: |
|
|
|
|
list_id.append(dict_o_s[vertex_id]) |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
#""" |
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
# 创建顶点可见性映射(基于面片可见性) |
|
|
|
|
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) |
|
|
|
|
# 遍历所有面片,将可见面片的顶点标记为可见 |
|
|
|
|
for face_idx, face in enumerate(triangles): |
|
|
|
|
if expanded_visibility[face_idx]: |
|
|
|
|
vertex_visibility[face[0]] = True |
|
|
|
|
vertex_visibility[face[1]] = True |
|
|
|
|
vertex_visibility[face[2]] = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vertices = np.asarray(self.mesh.vertices) |
|
|
|
|
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} |
|
|
|
|
|
|
|
|
|
vertex_colors = np.asarray(self.mesh.vertex_colors) |
|
|
|
|
if face_points==None: |
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
else: |
|
|
|
|
list_id = [] |
|
|
|
|
# sorted_verts = self.sort_vertices(self.mesh.vertices) |
|
|
|
|
sorted_verts =sorted( |
|
|
|
|
(tuple(v.tolist()) for v in vertices), |
|
|
|
|
key=lambda v: (v[0], v[1], v[2]) |
|
|
|
|
) |
|
|
|
|
dict_s_o = {} |
|
|
|
|
dict_o_s = {} |
|
|
|
|
for sorted_idx, sorted_v in enumerate(sorted_verts): |
|
|
|
|
original_idx = vertex_index_map[sorted_v] |
|
|
|
|
dict_s_o[sorted_idx] = original_idx |
|
|
|
|
dict_o_s[original_idx] = sorted_idx |
|
|
|
|
|
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
# print(vertex_id, coord) |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
if dict_o_s[vertex_id] in face_points: |
|
|
|
|
list_id.append(dict_o_s[vertex_id]) |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
#""" |
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
# 创建顶点可见性映射(基于面片可见性) |
|
|
|
|
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) |
|
|
|
|
# 遍历所有面片,将可见面片的顶点标记为可见 |
|
|
|
|
for face_idx, face in enumerate(triangles): |
|
|
|
|
if shrunk_visibility2[face_idx]: |
|
|
|
|
vertex_visibility[face[0]] = True |
|
|
|
|
vertex_visibility[face[1]] = True |
|
|
|
|
vertex_visibility[face[2]] = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vertices = np.asarray(self.mesh.vertices) |
|
|
|
|
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} |
|
|
|
|
|
|
|
|
|
vertex_colors = np.asarray(self.mesh.vertex_colors) |
|
|
|
|
if face_points==None: |
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
else: |
|
|
|
|
list_id = [] |
|
|
|
|
# sorted_verts = self.sort_vertices(self.mesh.vertices) |
|
|
|
|
sorted_verts =sorted( |
|
|
|
|
(tuple(v.tolist()) for v in vertices), |
|
|
|
|
key=lambda v: (v[0], v[1], v[2]) |
|
|
|
|
) |
|
|
|
|
dict_s_o = {} |
|
|
|
|
dict_o_s = {} |
|
|
|
|
for sorted_idx, sorted_v in enumerate(sorted_verts): |
|
|
|
|
original_idx = vertex_index_map[sorted_v] |
|
|
|
|
dict_s_o[sorted_idx] = original_idx |
|
|
|
|
dict_o_s[original_idx] = sorted_idx |
|
|
|
|
|
|
|
|
|
for vertex_id, coord in enumerate(self.mesh.vertices): |
|
|
|
|
# print(vertex_id, coord) |
|
|
|
|
if vertex_visibility[vertex_id]: |
|
|
|
|
if dict_o_s[vertex_id] in face_points: |
|
|
|
|
list_id.append(dict_o_s[vertex_id]) |
|
|
|
|
vertex_colors[vertex_id] = [1.0, 0.0, 0.0] |
|
|
|
|
|
|
|
|
|
# 保存最终模型 |
|
|
|
|
output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply" |
|
|
|
|
o3d.io.write_triangle_mesh(output_path, self.mesh) |
|
|
|
|
print(f"Processing completed. Results saved to {output_path}") |
|
|
|
|
#""" |
|
|
|
|
|
|
|
|
|
return shrunk_visibility, expanded_edge, delete_edge |
|
|
|
|
|
|
|
|
|
@ -976,28 +1139,14 @@ class ModelProcessor:
@@ -976,28 +1139,14 @@ class ModelProcessor:
|
|
|
|
|
images = read_images_text(os.path.join(self.pose_path, "images.txt")) |
|
|
|
|
|
|
|
|
|
camera_data = {} |
|
|
|
|
""" |
|
|
|
|
for img in images.values(): |
|
|
|
|
if self.mask_image == img.name[:-4]: |
|
|
|
|
camera = cameras[img.camera_id] |
|
|
|
|
camera_data = { |
|
|
|
|
"qvec": img.qvec, |
|
|
|
|
"tvec": img.tvec, |
|
|
|
|
"fx": camera.params[0], |
|
|
|
|
"fy": camera.params[1], |
|
|
|
|
"cx": camera.params[2], |
|
|
|
|
"cy": camera.params[3], |
|
|
|
|
"width": camera.width, |
|
|
|
|
"height": camera.height, |
|
|
|
|
"name": img.name[:-4] |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return self._flag_model(camera_data, None) |
|
|
|
|
""" |
|
|
|
|
countour_faces_dict = {} |
|
|
|
|
visible_faces_dict = {} |
|
|
|
|
edge_faces_dict = {} |
|
|
|
|
delete_edge_faces_dict = {} |
|
|
|
|
|
|
|
|
|
total_start = time.time() |
|
|
|
|
|
|
|
|
|
n = 0 |
|
|
|
|
for img in images.values(): |
|
|
|
|
camera = cameras[img.camera_id] |
|
|
|
|
@ -1012,22 +1161,29 @@ class ModelProcessor:
@@ -1012,22 +1161,29 @@ class ModelProcessor:
|
|
|
|
|
"height": camera.height, |
|
|
|
|
"name": img.name[:-4] |
|
|
|
|
} |
|
|
|
|
img_name = img.name[:-4] |
|
|
|
|
print("img_name=", img_name, n) |
|
|
|
|
img_name = img.name[:-4] |
|
|
|
|
# if (img_name!="73_8" and img_name!="52_8" and img_name!="62_8"): |
|
|
|
|
# if (img_name!="52_8" and img_name!="62_8"): |
|
|
|
|
# if (img_name!="52_8"): |
|
|
|
|
# continue |
|
|
|
|
# face_visibility2, face_contour = self._flag_contour(camera_data, None) |
|
|
|
|
# countour_faces_dict[img.name[:-4]] = np.where(face_contour)[0].tolist() |
|
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
|
face_visibility, face_edge, face_delete_edge = self._flag_model(camera_data, None) |
|
|
|
|
visible_faces_dict[img.name[:-4]] = np.where(face_visibility)[0].tolist() |
|
|
|
|
processing_time = time.time() - start_time |
|
|
|
|
|
|
|
|
|
visible_faces = np.where(face_visibility)[0].tolist() |
|
|
|
|
visible_faces_dict[img.name[:-4]] = visible_faces |
|
|
|
|
edge_faces_dict[img.name[:-4]] = np.where(face_edge)[0].tolist() |
|
|
|
|
delete_edge_faces_dict[img.name[:-4]] = np.where(face_delete_edge)[0].tolist() |
|
|
|
|
n += 1 |
|
|
|
|
|
|
|
|
|
print(f"图像={img_name},耗时={processing_time:.2f}秒,可见面数={len(visible_faces)}") |
|
|
|
|
|
|
|
|
|
total_time = time.time() - total_start |
|
|
|
|
print(f"所有图像处理完成,总耗时: {total_time:.2f}秒") |
|
|
|
|
print(f"平均每张图像耗时: {total_time/len(images):.2f}秒") |
|
|
|
|
|
|
|
|
|
return {"result1": visible_faces_dict, "result2": edge_faces_dict, "result3": delete_edge_faces_dict} |
|
|
|
|
# return {"result1": visible_faces_dict, "result2": countour_faces_dict} |
|
|
|
|
|
|
|
|
|
def process(self): |
|
|
|
|
|
|
|
|
|
@ -1037,8 +1193,8 @@ class ModelProcessor:
@@ -1037,8 +1193,8 @@ class ModelProcessor:
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
# 处理物理相机生成遮挡判断 |
|
|
|
|
# self._mask_face_occlusion() |
|
|
|
|
return self._mask_occlusion() |
|
|
|
|
# return self._mask_occlusion() |
|
|
|
|
return self._mask_occlusion_gpu() |
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
|
print(f"Error during processing: {str(e)}") |
|
|
|
|
|