Browse Source

处理染色问题

master
hesuicong 3 months ago
parent
commit
74b5bbd255
  1. 2
      libs/MVS/DepthMap.cpp
  2. 18
      libs/MVS/Scene.h
  3. 1894
      libs/MVS/SceneTexture.cpp
  4. BIN
      libs/MVS/__pycache__/display_demo.cpython-310.pyc
  5. BIN
      libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc
  6. 77
      libs/MVS/display_demo.py
  7. 483
      libs/MVS/mask_face_occlusion.py

2
libs/MVS/DepthMap.cpp

@ -301,7 +301,7 @@ bool DepthEstimator::ImportIgnoreMask(const Image& image0, const Image8U::Size&
{ {
ASSERT(image0.IsValid() && !image0.image.empty()); ASSERT(image0.IsValid() && !image0.image.empty());
const String maskFileName(image0.maskName.empty() ? Util::getFileFullName(image0.name)+".mask.png" : image0.maskName); const String maskFileName(image0.maskName.empty() ? Util::getFileFullName(image0.name)+".mask.png" : image0.maskName);
std::cout << "maskFileName: " << maskFileName << std::endl; // std::cout << "maskFileName: " << maskFileName << std::endl;
Image16U mask; Image16U mask;
if (!mask.Load(maskFileName)) { if (!mask.Load(maskFileName)) {
DEBUG("warning: can not load the segmentation mask '%s'", maskFileName.c_str()); DEBUG("warning: can not load the segmentation mask '%s'", maskFileName.c_str());

18
libs/MVS/Scene.h

@ -63,6 +63,8 @@ public:
unsigned nMaxThreads; // maximum number of threads used to distribute the work load unsigned nMaxThreads; // maximum number of threads used to distribute the work load
std::map<std::string, std::unordered_set<int>> visible_faces_map; std::map<std::string, std::unordered_set<int>> visible_faces_map;
std::map<std::string, std::unordered_set<int>> edge_faces_map;
std::map<std::string, std::unordered_set<int>> delete_edge_faces_map;
std::unordered_set<int> face_visible_relative; std::unordered_set<int> face_visible_relative;
public: public:
@ -152,6 +154,17 @@ public:
unsigned nMaxFaceArea, unsigned nScales, float fScaleStep, unsigned nAlternatePair, float fRegularityWeight, float fRatioRigidityElasticity, float fGradientStep); unsigned nMaxFaceArea, unsigned nScales, float fScaleStep, unsigned nAlternatePair, float fRegularityWeight, float fRatioRigidityElasticity, float fGradientStep);
#endif #endif
void SaveVisibleFacesData(std::map<std::string, std::unordered_set<int>>& visible_faces_map,
std::unordered_set<int>& face_visible_relative,
std::map<std::string, std::unordered_set<int>>& edge_faces_map,
std::map<std::string, std::unordered_set<int>>& delete_edge_faces_map,
std::string& basePath);
bool LoadVisibleFacesData(std::map<std::string, std::unordered_set<int>>& visible_faces_map,
std::unordered_set<int>& face_visible_relative,
std::map<std::string, std::unordered_set<int>>& edge_faces_map,
std::map<std::string, std::unordered_set<int>>& delete_edge_faces_map,
std::string& basePath);
// Mesh texturing // Mesh texturing
bool TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsigned minCommonCameras=0, float fOutlierThreshold=0.f, float fRatioDataSmoothness=0.3f, bool TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsigned minCommonCameras=0, float fOutlierThreshold=0.f, float fRatioDataSmoothness=0.3f,
bool bGlobalSeamLeveling=true, bool bLocalSeamLeveling=true, unsigned nTextureSizeMultiple=0, unsigned nRectPackingHeuristic=3, Pixel8U colEmpty=Pixel8U(255,127,39), bool bGlobalSeamLeveling=true, bool bLocalSeamLeveling=true, unsigned nTextureSizeMultiple=0, unsigned nRectPackingHeuristic=3, Pixel8U colEmpty=Pixel8U(255,127,39),
@ -159,8 +172,13 @@ public:
bool is_face_visible(const std::string& image_name, int face_index); bool is_face_visible(const std::string& image_name, int face_index);
bool is_face_visible_relative(int face_index); bool is_face_visible_relative(int face_index);
bool is_face_edge(const std::string& image_name, int face_index);
bool is_face_delete_edge(const std::string& image_name, int face_index);
void SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold); void SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold);
void ShowMesh();
#ifdef _USE_BOOST #ifdef _USE_BOOST
// implement BOOST serialization // implement BOOST serialization
template <class Archive> template <class Archive>

1894
libs/MVS/SceneTexture.cpp

File diff suppressed because it is too large Load Diff

BIN
libs/MVS/__pycache__/display_demo.cpython-310.pyc

Binary file not shown.

BIN
libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc

Binary file not shown.

77
libs/MVS/display_demo.py

@ -0,0 +1,77 @@
import open3d as o3d
import os
import argparse
import numpy as np
from PIL import Image
class DisplayProcessor:
def __init__(self):
# argv = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else []
parser = argparse.ArgumentParser()
parser.add_argument(
"--id",
required=True,
)
parser.add_argument(
"--mask",
default=0
)
args = parser.parse_args()
self.id = args.id
mask = args.mask
out_dir = f"out.{self.id}.nomask"
if mask==1:
out_dir = f"out.{self.id}"
self.mesh = None
self.obj_path = f"/home/algo/Documents/openMVS/data/{self.id}/{out_dir}/mesh.obj"
self.load_and_show()
def load_and_show(self):
# 加载并变换所有模型
# 加载网格
mesh = None
try:
mesh = o3d.io.read_triangle_mesh(self.obj_path, enable_post_processing=True)
if not mesh.has_vertices():
print(f"警告: 网格无有效顶点 - {self.obj_path}")
except Exception as e:
print(f"加载模型失败: {self.obj_path} - {e}")
if not mesh:
print("没有加载到任何模型,请检查错误信息")
else:
# 可视化模型
print("显示模型... (按'Q'退出)")
try:
from packaging import version
o3d_version = version.parse(o3d.__version__)
# 新版本 draw_geometries 参数
if o3d_version >= version.parse("0.13.0"):
o3d.visualization.draw_geometries(
[mesh],
window_name="模型展示",
mesh_show_back_face=True,
mesh_show_wireframe=False
)
# 旧版本 draw_geometries 参数
else:
o3d.visualization.draw_geometries(
[mesh],
window_name="模型展示",
point_show_normal=False,
mesh_show_back_face=True
)
except Exception as e:
print(f"使用 draw_geometries 可视化失败: {e}")
# 主程序
if __name__ == "__main__":
DisplayProcessor().load_and_show()

483
libs/MVS/mask_face_occlusion.py

@ -105,7 +105,7 @@ class ModelProcessor:
if i != j and j not in self.face_adjacency[i]: if i != j and j not in self.face_adjacency[i]:
self.face_adjacency[i].append(j) self.face_adjacency[i].append(j)
def _expand_face_visibility(self, face_visibility): def _expand_face_visibility(self, face_visibility, shrink_radius = 1):
if self.face_adjacency is None: if self.face_adjacency is None:
return face_visibility.copy() return face_visibility.copy()
@ -120,7 +120,7 @@ class ModelProcessor:
queue.append((face_idx, 0)) # (面片索引, 当前扩展层数) queue.append((face_idx, 0)) # (面片索引, 当前扩展层数)
visited.add(face_idx) visited.add(face_idx)
self.expand_radius = 10 self.expand_radius = shrink_radius
# 广度优先扩展 # 广度优先扩展
while queue: while queue:
current_idx, current_radius = queue.popleft() current_idx, current_radius = queue.popleft()
@ -214,6 +214,7 @@ class ModelProcessor:
# 多级遮挡检测 # 多级遮挡检测
visible_mask, occlusion_mask = self._hierarchical_occlusion_test( visible_mask, occlusion_mask = self._hierarchical_occlusion_test(
# visible_mask, occlusion_mask, vertex_depth_difference = self._hierarchical_occlusion_test2(
vertices_cam[frustum_mask], vertices_cam[frustum_mask],
depth_pyramid, depth_pyramid,
(fx, fy, cx, cy), (fx, fy, cx, cy),
@ -226,6 +227,10 @@ class ModelProcessor:
final_occlusion = np.zeros(len(vertices), dtype=bool) final_occlusion = np.zeros(len(vertices), dtype=bool)
final_occlusion[frustum_mask] = occlusion_mask final_occlusion[frustum_mask] = occlusion_mask
# final_vertex_difference = np.zeros(len(vertices), dtype=bool)
# final_vertex_difference[frustum_mask] = vertex_depth_difference
# return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices), final_vertex_difference.tolist()
return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices) return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices)
def _build_depth_pyramid2(self, depth_map, levels=4): def _build_depth_pyramid2(self, depth_map, levels=4):
@ -320,6 +325,197 @@ class ModelProcessor:
return final_visible, final_occlusion return final_visible, final_occlusion
def _hierarchical_occlusion_test2(self, vertices_cam, depth_pyramid, intrinsics, img_size):
"""层级式遮挡检测(安全版本)"""
fx, fy, cx, cy = intrinsics
height, width = img_size
# 1. 过滤无效顶点
valid_mask = vertices_cam[:, 2] > 1e-6
vertices_valid = vertices_cam[valid_mask]
if len(vertices_valid) == 0:
return (np.zeros(len(vertices_cam), dtype=bool),
np.zeros(len(vertices_cam), dtype=bool),
np.zeros(len(vertices_cam))) # 返回空的深度差值数组
visible = np.zeros(len(vertices_valid), dtype=bool)
occlusion = np.zeros(len(vertices_valid), dtype=bool)
# 用于存储每个像素点的深度范围(最小值和最大值)
pixel_depth_min = {}
pixel_depth_max = {}
# 2. 层级检测
for level in reversed(range(len(depth_pyramid))):
scale = 2 ** level
current_depth = depth_pyramid[level]
h, w = current_depth.shape
# 安全构造内参矩阵
K = np.array([
[max(fx/(scale + 1e-6), 1e-6), 0, (cx - 0.5)/scale + 0.5],
[0, max(fy/(scale + 1e-6), 1e-6), (cy - 0.5)/scale + 0.5],
[0, 0, 1]
], dtype=np.float32)
# 投影计算
uv_homo = (K @ vertices_valid.T).T
uv = uv_homo[:, :2] / uv_homo[:, 2:3]
# 安全边界处理
u = np.clip(uv[:, 0], 0.0, float(w-1))
v = np.clip(uv[:, 1], 0.0, float(h-1))
# 转换为整数索引
u_idx = np.clip(np.floor(u).astype(np.int32), 0, w-1)
v_idx = np.clip(np.floor(v).astype(np.int32), 0, h-1)
# 采样深度值
depth_vals = current_depth[v_idx, u_idx]
# 只在最高分辨率层级(level=0)记录像素深度范围
if level == 0:
for i in range(len(u_idx)):
pixel_key = (u_idx[i], v_idx[i])
vertex_depth = vertices_valid[i, 2]
# 更新像素的最小深度值
if pixel_key not in pixel_depth_min or vertex_depth < pixel_depth_min[pixel_key]:
pixel_depth_min[pixel_key] = vertex_depth
# 更新像素的最大深度值
if pixel_key not in pixel_depth_max or vertex_depth > pixel_depth_max[pixel_key]:
pixel_depth_max[pixel_key] = vertex_depth
# 深度比较
level_tol = 0.0008 * (2 ** level) # 0.005 0.0008
visible |= (vertices_valid[:, 2] <= (depth_vals + level_tol))
occlusion |= (vertices_valid[:, 2] > (depth_vals + level_tol))
# 计算每个像素的深度差值(最大深度 - 最小深度)
pixel_depth_difference = {}
for pixel_key in pixel_depth_min:
if pixel_key in pixel_depth_max:
pixel_depth_difference[pixel_key] = pixel_depth_max[pixel_key] - pixel_depth_min[pixel_key]
# 为每个顶点分配对应的像素点深度差值
vertex_depth_difference = np.zeros(len(vertices_cam))
if level == 0: # 确保我们记录了深度范围
for i in range(len(vertices_valid)):
pixel_key = (u_idx[i], v_idx[i])
if pixel_key in pixel_depth_difference:
# 找到原始顶点索引
orig_idx = np.where(valid_mask)[0][i]
vertex_depth_difference[orig_idx] = pixel_depth_difference[pixel_key]
# 3. 结果映射
final_visible = np.zeros(len(vertices_cam), dtype=bool)
final_visible[valid_mask] = visible
final_occlusion = np.zeros(len(vertices_cam), dtype=bool)
final_occlusion[valid_mask] = occlusion
return final_visible, final_occlusion, vertex_depth_difference
def _hierarchical_occlusion_test3(self, vertices_cam, depth_pyramid, intrinsics, img_size):
"""层级式遮挡检测(安全版本)"""
fx, fy, cx, cy = intrinsics
height, width = img_size
# 1. 过滤无效顶点
valid_mask = vertices_cam[:, 2] > 1e-6
vertices_valid = vertices_cam[valid_mask]
if len(vertices_valid) == 0:
return (np.zeros(len(vertices_cam), dtype=bool),
np.zeros(len(vertices_cam), dtype=bool),
{}) # 返回空的深度差值字典
visible = np.zeros(len(vertices_valid), dtype=bool)
occlusion = np.zeros(len(vertices_valid), dtype=bool)
# 用于存储每个像素点的深度范围
pixel_depth_range = {}
# 用于存储每个顶点对应的像素坐标和深度差值
vertex_pixel_info = {}
# 2. 层级检测
for level in reversed(range(len(depth_pyramid))):
scale = 2 ** level
current_depth = depth_pyramid[level]
h, w = current_depth.shape
# 安全构造内参矩阵
K = np.array([
[max(fx/(scale + 1e-6), 1e-6), 0, (cx - 0.5)/scale + 0.5],
[0, max(fy/(scale + 1e-6), 1e-6), (cy - 0.5)/scale + 0.5],
[0, 0, 1]
], dtype=np.float32)
# 投影计算
uv_homo = (K @ vertices_valid.T).T
uv = uv_homo[:, :2] / uv_homo[:, 2:3]
# 安全边界处理
u = np.clip(uv[:, 0], 0.0, float(w-1))
v = np.clip(uv[:, 1], 0.0, float(h-1))
# 转换为整数索引
u_idx = np.clip(np.floor(u).astype(np.int32), 0, w-1)
v_idx = np.clip(np.floor(v).astype(np.int32), 0, h-1)
# 采样深度值
depth_vals = current_depth[v_idx, u_idx]
# 记录每个像素点的深度范围(只在最高分辨率层级记录)
# if level == 0: # 只在原始分辨率层级记录
if True:
for i in range(len(u_idx)):
vertex_idx = np.where(valid_mask)[0][i] # 获取原始顶点索引
pixel_key = (u_idx[i], v_idx[i])
# 记录顶点对应的像素坐标
vertex_pixel_info[vertex_idx] = pixel_key
# 记录像素点的深度范围
if pixel_key not in pixel_depth_range:
pixel_depth_range[pixel_key] = {
'min': vertices_valid[i, 2], # 顶点深度
'max': vertices_valid[i, 2], # 顶点深度
'count': 1
}
else:
pixel_depth_range[pixel_key]['min'] = min(
pixel_depth_range[pixel_key]['min'], vertices_valid[i, 2])
pixel_depth_range[pixel_key]['max'] = max(
pixel_depth_range[pixel_key]['max'], vertices_valid[i, 2])
pixel_depth_range[pixel_key]['count'] += 1
# 深度比较
level_tol = 0.0008 * (2 ** level) # 0.005 0.0008
visible |= (vertices_valid[:, 2] <= (depth_vals + level_tol))
occlusion |= (vertices_valid[:, 2] > (depth_vals + level_tol))
# 计算每个像素点的深度差值
pixel_depth_difference = {}
for pixel_key, depth_range in pixel_depth_range.items():
pixel_depth_difference[pixel_key] = depth_range['max'] - depth_range['min']
# 为每个顶点分配对应的像素点深度差值
vertex_depth_difference = np.zeros(len(vertices_cam))
for vertex_idx, pixel_key in vertex_pixel_info.items():
if pixel_key in pixel_depth_difference:
vertex_depth_difference[vertex_idx] = pixel_depth_difference[pixel_key]
# 3. 结果映射
final_visible = np.zeros(len(vertices_cam), dtype=bool)
final_visible[valid_mask] = visible
final_occlusion = np.zeros(len(vertices_cam), dtype=bool)
final_occlusion[valid_mask] = occlusion
return final_visible, final_occlusion, vertex_depth_difference
def _occlusion_expansion(self, occlusion_mask, vertices, radius=0.0008): def _occlusion_expansion(self, occlusion_mask, vertices, radius=0.0008):
"""基于空间哈希的快速遮挡扩展""" """基于空间哈希的快速遮挡扩展"""
from collections import defaultdict from collections import defaultdict
@ -393,6 +589,7 @@ class ModelProcessor:
R = self.qvec2rotmat(camera_data['qvec']).T R = self.qvec2rotmat(camera_data['qvec']).T
eye = -R @ camera_data['tvec'] eye = -R @ camera_data['tvec']
# eye = camera_data['tvec'] # eye = camera_data['tvec']
# final_visible_list, final_occlusion_list, final_vertex_difference_list = self._compute_vertex_in_frustum(
final_visible_list, final_occlusion_list = self._compute_vertex_in_frustum( final_visible_list, final_occlusion_list = self._compute_vertex_in_frustum(
camera_data['fx'], camera_data['fy'], camera_data['fx'], camera_data['fy'],
camera_data['cx'], camera_data['cy'], camera_data['cx'], camera_data['cy'],
@ -443,11 +640,12 @@ class ModelProcessor:
output_path = f"{self.asset_dir}/mesh_{self.id}_脸部遮挡判断.ply" output_path = f"{self.asset_dir}/mesh_{self.id}_脸部遮挡判断.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh) o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}") print(f"Processing completed. Results saved to {output_path}")
""" #"""
# 获取三角形面片数组 # 获取三角形面片数组
triangles = np.asarray(self.mesh.triangles) triangles = np.asarray(self.mesh.triangles)
face_visible_bitmap = np.zeros(len(triangles), dtype=bool) face_visible_bitmap = np.zeros(len(triangles), dtype=bool)
# face_edge_bitmap = np.zeros(len(triangles), dtype=bool)
# 遍历所有面片 # 遍历所有面片
for face_idx, face in enumerate(triangles): for face_idx, face in enumerate(triangles):
@ -459,11 +657,266 @@ class ModelProcessor:
final_visible_list[v2] final_visible_list[v2]
]) ])
# threshold = 0.5
# face_edge_bitmap[face_idx] = all([ # any all
# final_vertex_difference_list[v0] < threshold,
# final_vertex_difference_list[v1] < threshold,
# final_vertex_difference_list[v2] < threshold
# ])
# return face_visible_bitmap # return face_visible_bitmap
# expanded_visibility = self._expand_face_visibility(face_visible_bitmap)
# return expanded_visibility
shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10 shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10
return shrunk_visibility
# 16,13;13,16;16,16
expanded_visibility = self._expand_face_visibility(face_visible_bitmap, 30)
shrunk_visibility2 = self._shrink_face_visibility(face_visible_bitmap, 50)
expanded_edge = expanded_visibility & ~shrunk_visibility2
delete_edge = face_visible_bitmap & ~shrunk_visibility
"""
# 创建顶点可见性映射(基于面片可见性)
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool)
# 遍历所有面片,将可见面片的顶点标记为可见
for face_idx, face in enumerate(triangles):
# if expanded_edge[face_idx] and face_edge_bitmap[face_idx] :
if delete_edge[face_idx] :
vertex_visibility[face[0]] = True
vertex_visibility[face[1]] = True
vertex_visibility[face[2]] = True
vertices = np.asarray(self.mesh.vertices)
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)}
vertex_colors = np.asarray(self.mesh.vertex_colors)
if face_points==None:
for vertex_id, coord in enumerate(self.mesh.vertices):
if vertex_visibility[vertex_id]:
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
else:
list_id = []
# sorted_verts = self.sort_vertices(self.mesh.vertices)
sorted_verts =sorted(
(tuple(v.tolist()) for v in vertices),
key=lambda v: (v[0], v[1], v[2])
)
dict_s_o = {}
dict_o_s = {}
for sorted_idx, sorted_v in enumerate(sorted_verts):
original_idx = vertex_index_map[sorted_v]
dict_s_o[sorted_idx] = original_idx
dict_o_s[original_idx] = sorted_idx
for vertex_id, coord in enumerate(self.mesh.vertices):
# print(vertex_id, coord)
if vertex_visibility[vertex_id]:
if dict_o_s[vertex_id] in face_points:
list_id.append(dict_o_s[vertex_id])
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
#"""
"""
# 创建顶点可见性映射(基于面片可见性)
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool)
# 遍历所有面片,将可见面片的顶点标记为可见
for face_idx, face in enumerate(triangles):
if expanded_visibility[face_idx]:
vertex_visibility[face[0]] = True
vertex_visibility[face[1]] = True
vertex_visibility[face[2]] = True
vertices = np.asarray(self.mesh.vertices)
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)}
vertex_colors = np.asarray(self.mesh.vertex_colors)
if face_points==None:
for vertex_id, coord in enumerate(self.mesh.vertices):
if vertex_visibility[vertex_id]:
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
else:
list_id = []
# sorted_verts = self.sort_vertices(self.mesh.vertices)
sorted_verts =sorted(
(tuple(v.tolist()) for v in vertices),
key=lambda v: (v[0], v[1], v[2])
)
dict_s_o = {}
dict_o_s = {}
for sorted_idx, sorted_v in enumerate(sorted_verts):
original_idx = vertex_index_map[sorted_v]
dict_s_o[sorted_idx] = original_idx
dict_o_s[original_idx] = sorted_idx
for vertex_id, coord in enumerate(self.mesh.vertices):
# print(vertex_id, coord)
if vertex_visibility[vertex_id]:
if dict_o_s[vertex_id] in face_points:
list_id.append(dict_o_s[vertex_id])
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
#"""
"""
# 创建顶点可见性映射(基于面片可见性)
vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool)
# 遍历所有面片,将可见面片的顶点标记为可见
for face_idx, face in enumerate(triangles):
if shrunk_visibility2[face_idx]:
vertex_visibility[face[0]] = True
vertex_visibility[face[1]] = True
vertex_visibility[face[2]] = True
vertices = np.asarray(self.mesh.vertices)
vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)}
vertex_colors = np.asarray(self.mesh.vertex_colors)
if face_points==None:
for vertex_id, coord in enumerate(self.mesh.vertices):
if vertex_visibility[vertex_id]:
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
else:
list_id = []
# sorted_verts = self.sort_vertices(self.mesh.vertices)
sorted_verts =sorted(
(tuple(v.tolist()) for v in vertices),
key=lambda v: (v[0], v[1], v[2])
)
dict_s_o = {}
dict_o_s = {}
for sorted_idx, sorted_v in enumerate(sorted_verts):
original_idx = vertex_index_map[sorted_v]
dict_s_o[sorted_idx] = original_idx
dict_o_s[original_idx] = sorted_idx
for vertex_id, coord in enumerate(self.mesh.vertices):
# print(vertex_id, coord)
if vertex_visibility[vertex_id]:
if dict_o_s[vertex_id] in face_points:
list_id.append(dict_o_s[vertex_id])
vertex_colors[vertex_id] = [1.0, 0.0, 0.0]
# 保存最终模型
output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Processing completed. Results saved to {output_path}")
#"""
return shrunk_visibility, expanded_edge, delete_edge
def _flag_contour(self, camera_data, face_points):
"""标记可见顶点"""
vertex_visible = []
vertex_occlusion = []
depth_images = []
render = o3d.visualization.rendering.OffscreenRenderer(camera_data['width'], camera_data['height'])
material = o3d.visualization.rendering.MaterialRecord()
render.scene.add_geometry("mesh", self.mesh, material)
# 生成深度图
depth_image = self._gen_depth_image(camera_data, render)
# 获取相机参数
fx = camera_data['fx']
fy = camera_data['fy']
cx = camera_data['cx']
cy = camera_data['cy']
height = camera_data['height']
width = camera_data['width']
# 计算顶点在相机空间中的坐标
w2c = get_w2c(camera_data['qvec'], camera_data['tvec'])
vertices = np.asarray(self.mesh.vertices)
vertices_homo = np.hstack([vertices, np.ones((len(vertices), 1))])
vertices_cam = (w2c @ vertices_homo.T).T[:, :3]
# 过滤掉相机后面的顶点
valid_mask = vertices_cam[:, 2] > 0
vertices_valid = vertices_cam[valid_mask]
# 投影顶点到图像平面
u = (vertices_valid[:, 0] * fx / vertices_valid[:, 2] + cx)
v = (vertices_valid[:, 1] * fy / vertices_valid[:, 2] + cy)
u_idx = np.clip(np.floor(u).astype(int), 0, width-1)
v_idx = np.clip(np.floor(v).astype(int), 0, height-1)
# 初始化 min_depth_map 和 max_depth_map
min_depth_map = np.full((height, width), np.inf)
max_depth_map = np.zeros((height, width))
# 更新 min_depth_map 和 max_depth_map
for i in range(len(vertices_valid)):
x = u_idx[i]
y = v_idx[i]
d = vertices_valid[i, 2]
if d < min_depth_map[y, x]:
min_depth_map[y, x] = d
if d > max_depth_map[y, x]:
max_depth_map[y, x] = d
# 对于每个顶点,检查深度范围
edge_vertices = np.zeros(len(vertices), dtype=bool)
threshold = 3 # 阈值,可根据需要调整
for i in range(len(vertices_valid)):
x = u_idx[i]
y = v_idx[i]
if min_depth_map[y, x] < np.inf: # 确保有数据
depth_range = max_depth_map[y, x] - min_depth_map[y, x]
if depth_range > threshold:
# 找到原始顶点索引
orig_idx = np.where(valid_mask)[0][i]
edge_vertices[orig_idx] = False
# 标记边缘顶点
vertex_colors = np.asarray(self.mesh.vertex_colors)
for i in range(len(vertices)):
if edge_vertices[i]:
vertex_colors[i] = [1.0, 0.0, 0.0] # 红色表示边缘
# 保存模型
output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply"
o3d.io.write_triangle_mesh(output_path, self.mesh)
print(f"Edge detection completed. Results saved to {output_path}")
# 计算面片的边缘性
triangles = np.asarray(self.mesh.triangles)
face_edge = np.zeros(len(triangles), dtype=bool)
for face_idx, face in enumerate(triangles):
if any(edge_vertices[face]):
face_edge[face_idx] = True
# 为了兼容原有代码,返回面片可见性和边缘性
# 注意:这里face_visible_bitmap未定义,但原有代码可能期望返回两个值
# 如果需要面片可见性,可以保留原有逻辑,但这里简化处理
face_visible_bitmap = np.ones(len(triangles), dtype=bool) # 临时填充
return face_visible_bitmap, face_edge
""" """
def _mask_face_occlusion(self): def _mask_face_occlusion(self):
@ -523,7 +976,11 @@ class ModelProcessor:
return self._flag_model(camera_data, None) return self._flag_model(camera_data, None)
""" """
countour_faces_dict = {}
visible_faces_dict = {} visible_faces_dict = {}
edge_faces_dict = {}
delete_edge_faces_dict = {}
n = 0
for img in images.values(): for img in images.values():
camera = cameras[img.camera_id] camera = cameras[img.camera_id]
camera_data = { camera_data = {
@ -538,14 +995,20 @@ class ModelProcessor:
"name": img.name[:-4] "name": img.name[:-4]
} }
img_name = img.name[:-4] img_name = img.name[:-4]
print("img_name=", img_name) print("img_name=", img_name, n)
# if (img_name!="72_2" and img_name!="82_2" and img_name!="83_2"): # 82_2 72_2 # if (img_name!="72_2" and img_name!="82_2" and img_name!="83_2"): # 82_2 72_2
#if (img_name!="82_2"): # if (img_name!="74_8"):
# continue # continue
face_visibility = self._flag_model(camera_data, None) # face_visibility2, face_contour = self._flag_contour(camera_data, None)
# countour_faces_dict[img.name[:-4]] = np.where(face_contour)[0].tolist()
face_visibility, face_edge, face_delete_edge = self._flag_model(camera_data, None)
visible_faces_dict[img.name[:-4]] = np.where(face_visibility)[0].tolist() visible_faces_dict[img.name[:-4]] = np.where(face_visibility)[0].tolist()
edge_faces_dict[img.name[:-4]] = np.where(face_edge)[0].tolist()
delete_edge_faces_dict[img.name[:-4]] = np.where(face_delete_edge)[0].tolist()
n += 1
return visible_faces_dict return {"result1": visible_faces_dict, "result2": edge_faces_dict, "result3": delete_edge_faces_dict}
# return {"result1": visible_faces_dict, "result2": countour_faces_dict}
def process(self): def process(self):

Loading…
Cancel
Save