diff --git a/libs/MVS/DepthMap.cpp b/libs/MVS/DepthMap.cpp index 5c26194..78536f6 100644 --- a/libs/MVS/DepthMap.cpp +++ b/libs/MVS/DepthMap.cpp @@ -301,7 +301,7 @@ bool DepthEstimator::ImportIgnoreMask(const Image& image0, const Image8U::Size& { ASSERT(image0.IsValid() && !image0.image.empty()); const String maskFileName(image0.maskName.empty() ? Util::getFileFullName(image0.name)+".mask.png" : image0.maskName); - std::cout << "maskFileName: " << maskFileName << std::endl; + // std::cout << "maskFileName: " << maskFileName << std::endl; Image16U mask; if (!mask.Load(maskFileName)) { DEBUG("warning: can not load the segmentation mask '%s'", maskFileName.c_str()); diff --git a/libs/MVS/Scene.h b/libs/MVS/Scene.h index 701c973..f66a1fe 100644 --- a/libs/MVS/Scene.h +++ b/libs/MVS/Scene.h @@ -63,6 +63,8 @@ public: unsigned nMaxThreads; // maximum number of threads used to distribute the work load std::map> visible_faces_map; + std::map> edge_faces_map; + std::map> delete_edge_faces_map; std::unordered_set face_visible_relative; public: @@ -152,6 +154,17 @@ public: unsigned nMaxFaceArea, unsigned nScales, float fScaleStep, unsigned nAlternatePair, float fRegularityWeight, float fRatioRigidityElasticity, float fGradientStep); #endif + void SaveVisibleFacesData(std::map>& visible_faces_map, + std::unordered_set& face_visible_relative, + std::map>& edge_faces_map, + std::map>& delete_edge_faces_map, + std::string& basePath); + bool LoadVisibleFacesData(std::map>& visible_faces_map, + std::unordered_set& face_visible_relative, + std::map>& edge_faces_map, + std::map>& delete_edge_faces_map, + std::string& basePath); + // Mesh texturing bool TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsigned minCommonCameras=0, float fOutlierThreshold=0.f, float fRatioDataSmoothness=0.3f, bool bGlobalSeamLeveling=true, bool bLocalSeamLeveling=true, unsigned nTextureSizeMultiple=0, unsigned nRectPackingHeuristic=3, Pixel8U colEmpty=Pixel8U(255,127,39), @@ -159,7 +172,12 @@ public: bool is_face_visible(const std::string& image_name, int face_index); bool is_face_visible_relative(int face_index); + bool is_face_edge(const std::string& image_name, int face_index); + bool is_face_delete_edge(const std::string& image_name, int face_index); + void SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold); + + void ShowMesh(); #ifdef _USE_BOOST // implement BOOST serialization diff --git a/libs/MVS/SceneTexture.cpp b/libs/MVS/SceneTexture.cpp index 3e81526..2953b52 100644 --- a/libs/MVS/SceneTexture.cpp +++ b/libs/MVS/SceneTexture.cpp @@ -68,7 +68,9 @@ using namespace MVS; #define TEXOPT_INFERENCE_LBP 1 #define TEXOPT_INFERENCE_TRWS 2 #define TEXOPT_INFERENCE TEXOPT_INFERENCE_LBP -// #define MASK_FACE_OCCLUSION +#define MASK_FACE_OCCLUSION +// #define DISPLAY_DEMO +#define CACHE_MASK // inference algorithm #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP @@ -385,6 +387,8 @@ struct MeshTexture { mutable FloatArr meshCurvatures; // 存储每个面的曲率值 void ComputeFaceCurvatures() const; + const Image8U3* alternativeTexture; // 备用纹理指针 + // used to sample seam edges typedef TAccumulator AccumColor; typedef Sampler::Linear Sampler; @@ -448,7 +452,8 @@ public: void CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; void CreateVirtualFaces4(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, Mesh::FaceIdxArr& mapFaceToVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f); void CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; - void CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; + bool CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; + IIndexArr SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; IIndexArr SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; bool FaceViewSelection(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); @@ -492,6 +497,78 @@ public: Color ycbcr = MeshTexture::RGB2YCBCR(rgb); return ycbcr[0]; // Y分量就是亮度 } + + // 定义结构体用于封装颜色中值和亮度中值 + struct MedianValues { + Color color; + float quality; + }; + + // 修改函数,返回MedianValues结构体 + static MedianValues ComputeMedianColorAndQuality(const std::vector>& views) { + std::vector colors; + std::vector qualities; // 新增:存储质量值 + + for (const auto& view : views) { + qualities.push_back(view.first); // 收集质量值 + colors.push_back(view.second); // 收集颜色值 + } + + // 对每个颜色通道和质量分别排序 + std::vector r, g, b; + for (const auto& color : colors) { + r.push_back(color[0]); + g.push_back(color[1]); + b.push_back(color[2]); + } + + std::sort(r.begin(), r.end()); + std::sort(g.begin(), g.end()); + std::sort(b.begin(), b.end()); + std::sort(qualities.begin(), qualities.end()); // 对质量排序 + + const int mid = colors.size() / 2; + + MedianValues result; + result.color = Color(r[mid], g[mid], b[mid]); // 颜色中值 + result.quality = qualities[mid]; // 质量中值 + + return result; + } + + // 计算亮度中值 + static float ComputeMedianLuminance(const std::vector>& views) { + std::vector luminances; + for (const auto& view : views) { + luminances.push_back(MeshTexture::GetLuminance(view.second)); + } + + std::sort(luminances.begin(), luminances.end()); + return luminances[luminances.size() / 2]; + } + + // 计算颜色绝对中位差(MAD) + static float ComputeColorMAD(const std::vector>& views, const Color& median) { + std::vector distances; + for (const auto& view : views) { + distances.push_back(cv::norm(view.second - median)); + } + + std::sort(distances.begin(), distances.end()); + return distances[distances.size() / 2]; + } + + // 计算亮度绝对中位差(MAD) + static float ComputeLuminanceMAD(const std::vector>& views, float medianLuminance) { + std::vector distances; + for (const auto& view : views) { + distances.push_back(std::abs(MeshTexture::GetLuminance(view.second) - medianLuminance)); + } + + std::sort(distances.begin(), distances.end()); + return distances[distances.size() / 2]; + } + //*/ /* // 采用ITU-R BT.601标准系数,增加数值稳定性处理 @@ -559,6 +636,7 @@ public: // store found texture patches TexturePatchArr texturePatches; + LabelArr labelsInvalid; // used to compute the seam leveling PairIdxArr seamEdges; // the (face-face) edges connecting different texture patches @@ -626,7 +704,8 @@ MeshTexture::MeshTexture(Scene& _scene, unsigned _nResolutionLevel, unsigned _nM vertices(_scene.mesh.vertices), faces(_scene.mesh.faces), images(_scene.images), - scene(_scene) + scene(_scene), + alternativeTexture(nullptr) { } MeshTexture::~MeshTexture() @@ -747,13 +826,15 @@ bool MeshTexture::ListCameraFaces(FaceDataViewArr& facesDatas, float fOutlierThr std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); /* - // if (strName!="122_2") // 7613212046 11_2 122_2 - // if (strName!="63_2" && strName!="62_2") // 274658 123_2 12_2 63_2 62_2 - // if (strName!="72_2" && strName!="82_2" && strName!="83_2") // 274658 72_2 12_2 112_2 14_2 82_2 83_2 - // if (strName!="12_2" && strName!="112_2" && strName!="14_2" && - // strName!="82_2" && strName!="83_2" && strName!="123_2") - if (strName!="82_2") - // 274658 72_2 12_2 112_2 14_2 82_2 83_2 123_2 + // if (strName!="74_8" && strName!="13_8" && strName!="61_8" && + // strName!="92_8" && strName!="101_8" && strName!="102_8" && + // strName!="103_8" && strName!="112_8" && strName!="113_8" && + // strName!="122_8" && strName!="123_8" && strName!="132_8") + // if (strName!="74_8" && strName!="13_8" && strName!="61_8" && + // strName!="92_8" && strName!="101_8" && strName!="102_8" && + // strName!="103_8" && strName!="112_8" && strName!="113_8" && + // strName!="122_8" && strName!="123_8" && strName!="132_8") + if (strName!="76_8") { continue; } @@ -1004,6 +1085,7 @@ bool MeshTexture::ListCameraFaces(FaceDataViewArr& facesDatas, float fOutlierThr #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif + { // faceQuality is influenced by : // + area: the higher the area the more gradient scores will be added to the face quality @@ -1020,6 +1102,12 @@ bool MeshTexture::ListCameraFaces(FaceDataViewArr& facesDatas, float fOutlierThr FaceDataArr& faceDatas = facesDatas[idxFace]; + const Pixel8U& pixel = imageData.image(j, i); + // 假设是8位图像,RGB三个通道任一超过250即视为过曝 + if (pixel.r > 250 || pixel.g > 250 || pixel.b > 250) { + // continue; + } + // if (!(scene.mesh.invalidFacesRelative.data.contains(idxFace) && scene.is_face_visible_relative(idxFace))) // if (false) { @@ -1542,7 +1630,7 @@ std::unordered_set MeshTexture::PerformLocalDepthConsistencyCheck(DepthM consistencyMask = dilatedMask; - printf("n1=%d, n2=%d, n3=%d, n4=%d\n", n1, n2, n3, n4); + // printf("n1=%d, n2=%d, n3=%d, n4=%d\n", n1, n2, n3, n4); for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { @@ -1684,7 +1772,7 @@ void MeshTexture::PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& // - a ratio of 1 means only angle is considered // - a ratio of 0.5 means angle and quality are equally important // - a ratio of 0 means only camera quality is considered when sorting -IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const +IIndexArr MeshTexture::SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const { ASSERT(!faceDatas.empty()); #if 1 @@ -1708,6 +1796,8 @@ IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, } return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); } + + //*/ // compute scores based on the view quality and its angle to the face normal @@ -1779,6 +1869,20 @@ IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, return cameras; } +IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const +{ + float maxQuality = -1; + IIndex bestView = NO_ID; + for (const FaceData& fd : faceDatas) { + if (fd.quality > maxQuality) { + maxQuality = fd.quality; + bestView = fd.idxView; + } + } + + return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); +} + static bool IsFaceVisible(const MeshTexture::FaceDataArr& faceDatas, const IIndexArr& cameraList) { size_t camFoundCounter(0); for (const MeshTexture::FaceData& faceData : faceDatas) { @@ -1822,7 +1926,7 @@ void MeshTexture::CreateVirtualFaces(const FaceDataViewArr& facesDatas, FaceData ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { - const IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { @@ -1977,7 +2081,7 @@ void MeshTexture::CreateVirtualFaces2(const FaceDataViewArr& facesDatas, FaceDat remainingFaces.RemoveAtMove(posToErase); } } else { - const IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); // 验证选择的相机有效性 for (IIndex camIdx : selectedCams) { @@ -2190,7 +2294,7 @@ void MeshTexture::CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDat ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { - const IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { @@ -2344,7 +2448,7 @@ void MeshTexture::CreateVirtualFaces4(const FaceDataViewArr& facesDatas, } // 选择公共相机 - IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); // 严格筛选有效视图 if (selectedCams.size() < minCommonCameras) { @@ -2597,7 +2701,7 @@ void MeshTexture::CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewA ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { - const IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { @@ -2738,11 +2842,13 @@ void MeshTexture::CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewA } while (!remainingFaces.empty()); } -void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const +bool MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const { if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } + + float thMaxColorDeviation = 130.0f; const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); @@ -2751,6 +2857,23 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; + + // Precompute average color for each face + Colors faceColors; // 创建一个空列表 + faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) + for (size_t i = 0; i < faces.size(); ++i) { + faceColors.push_back(Color::ZERO); // 逐个添加元素 + } + for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { + const FaceDataArr& faceDatas = facesDatas[idxFace]; + if (faceDatas.empty()) continue; + Color sumColor = Color::ZERO; + for (const FaceData& fd : faceDatas) { + sumColor += fd.color; + } + faceColors[idxFace] = sumColor / faceDatas.size(); + } + do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; @@ -2766,13 +2889,17 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA // 检查中心面片是否包含无效视图 bool bHasInvalidView = false; + int nInvalidViewCount = 0; + int nTotalViewCount = 0; for (const FaceData& faceData : centerFaceDatas) { if (faceData.bInvalidFacesRelative) { bHasInvalidView = true; - break; + ++nInvalidViewCount; + // break; } + ++nTotalViewCount; } - + std::vector> sortedViews; std::vector> sortedLuminViews; std::vector> validViews; @@ -2798,8 +2925,8 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA std::sort(validViews.begin(), validViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); - // int nSize = sortedViews.size(); - int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); + int nSize = sortedViews.size(); + // int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); // 计算初始平均值 float totalQuality = 0.0f; Color totalColor(0,0,0); @@ -2828,7 +2955,92 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { - const IIndexArr selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + + //* + // 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal) + const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; + // 过滤selectedCams:只保留夹角小于30度的视图 + IIndexArr filteredCams; // 用于存储过滤后的视图索引 + for (IIndex idxView : selectedCams) { + const Image& imageData = images[idxView]; + // 计算相机在世界坐标系中的朝向向量(相机镜面法线) + const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他 + // 相机局部坐标系中的向前向量 (0,0,-1) + Point3f localForward(0.0f, 0.0f, -1.0f); + // 手动计算矩阵乘法:cameraForward = R * localForward + Point3f cameraForward; + cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z; + cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z; + cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z; + + // 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数) + float norm = std::sqrt(cameraForward.x * cameraForward.x + + cameraForward.y * cameraForward.y + + cameraForward.z * cameraForward.z); + if (norm > 0.0f) { + cameraForward.x /= norm; + cameraForward.y /= norm; + cameraForward.z /= norm; + } else { + // 处理零向量的情况,赋予默认值 + cameraForward = Point3f(0, 0, -1); + } + + // 计算夹角余弦值 - 使用已声明的 normalCenter + // 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换 + Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例 + float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来) + float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度 + + std::string strPath = imageData.name; + size_t lastSlash = strPath.find_last_of("/\\"); + if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 + else lastSlash++; // 跳过分隔符 + + // 查找扩展名分隔符 '.' 的位置 + size_t lastDot = strPath.find_last_of('.'); + if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 + + // 截取文件名(不含路径和扩展名) + std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); + + // printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID); + + if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID)) + { + if (scene.is_face_edge(strName, virtualFaceCenterFaceID)) + { + // printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit); + + if (angleDeg <= 45.0f) + { + filteredCams.push_back(idxView); + } + } + else + { + filteredCams.push_back(idxView); + } + } + } + + // 确保 selectedCams 是非 const 的,才能对其进行赋值 + // 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr) + if (filteredCams.empty()) { + // 处理所有视图都被过滤的情况... + // DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition."); + + // selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + selectedCams = filteredCams; + isVirtualFace[virtualFaceCenterFaceID] = false; + + } else { + selectedCams = filteredCams; + isVirtualFace[virtualFaceCenterFaceID] = true; + } + //*/ + currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { @@ -2847,6 +3059,19 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; + // Check color similarity + const Color& centerColor = faceColors[virtualFaceCenterFaceID]; + const Color& currentColor = faceColors[currentFaceId]; + // if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) + { + float colorDistance = cv::norm(centerColor - currentColor); + // printf("1colorDistance=%f\n", colorDistance); + if (colorDistance > thMaxColorDeviation) { + // printf("2colorDistance=%f\n", colorDistance); + // continue; // Skip if color difference is too large + } + } + /* // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical @@ -2882,6 +3107,19 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA } } } while (!currentVirtualFaceQueue.IsEmpty()); + + /* + if (selectedCams.empty()) { + const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; + const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; + + FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); + virtualFaceData.color = medianColor; + virtualFaceData.quality = medianQuality; + + } + */ + // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); @@ -2952,9 +3190,9 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA } else { - virtualFaceData.quality += faceData.quality; + // virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA - virtualFaceData.color += faceData.color; + // virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; @@ -2967,8 +3205,140 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA float maxLuminance = 120.0f; float minLuminance = 90.0f; int validViewsSize = validViews.size(); + // bHasInvalidView = true; if (bHasInvalidView) { + // 使用鲁棒的统计方法计算颜色和亮度的中心值 + const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; + const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; + const float medianLuminance = ComputeMedianLuminance(sortedViews); + + // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 + const float colorMAD = ComputeColorMAD(sortedViews, medianColor); + const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); + + // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) + const float maxColorDeviation = 0.01f * colorMAD; + const float maxLuminanceDeviation = 0.01f * luminanceMAD; + + std::vector validIndices; + for (int n = 0; n < sortedViews.size(); ++n) { + const Color& viewColor = sortedViews[n].second; + const float viewLuminance = MeshTexture::GetLuminance(viewColor); + + const float colorDistance = cv::norm(viewColor - medianColor); + const float luminanceDistance = std::abs(viewLuminance - medianLuminance); + + if (colorDistance <= maxColorDeviation && + luminanceDistance <= maxLuminanceDeviation) + { + validIndices.push_back(n); + } + else + { + const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); + const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; + const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); + + bool bColorSimilarity = true; + // Check color similarity + const Color& centerColor = faceColors[virtualFaceCenterFaceID]; + const Color& currentColor = faceColors[currentFaceId]; + + float colorDistance = cv::norm(centerColor - currentColor); + // printf("1colorDistance=%f\n", colorDistance); + if (colorDistance > thMaxColorDeviation) { + // printf("2colorDistance=%f\n", colorDistance); + bColorSimilarity = false; + } + + // if ((cosFaceToCenter validIndices; + // std::vector validIndices; float maxColorDeviation = 0.01f; // 颜色偏差阈值 float maxLuminanceDeviation = 0.01f; @@ -3003,7 +3373,7 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA // (viewLuminance>=minLuminance)){ if ((colorDistance <= maxColorDeviation) && (luminanceDistance <= maxLuminanceDeviation)) { - validIndices.push_back(n); + // validIndices.push_back(n); } } @@ -3015,7 +3385,7 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA float luminanceDistance = cv::norm(avgLuminance - viewLuminance); if (luminanceDistance <= maxLuminanceDeviation){ - validIndices.push_back(n); + // validIndices.push_back(n); } } } @@ -3025,7 +3395,7 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA float colorDistance = cv::norm(avgColor - viewColor); if (colorDistance<=maxColorDeviation){ - validIndices.push_back(n); + // validIndices.push_back(n); } } } @@ -3061,20 +3431,27 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { - virtualFaceData.quality = avgQuality; + // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA - virtualFaceData.color = avgColor; + // virtualFaceData.color = avgColor; #endif // virtualFaceData.quality = avgQuality; // virtualFaceData.color = sortedLuminViews[0].second; + virtualFaceData.quality = medianQuality; + virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { + + const Color& viewColor = sortedViews[idx].second; + float colorDistance = cv::norm(avgColor - viewColor); + float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); + totalQuality2 += sortedViews[idx].first; - totalColor2 += sortedViews[idx].second; + totalColor2 += sortedViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); @@ -3112,19 +3489,18 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA const Color avgColor2 = totalColor2 / nSize; // 过滤偏差过大的视图 - std::vector validIndices; - float maxColorDeviation = 0.5f; // 颜色偏差阈值 + // std::vector validIndices; + float maxColorDeviation = 0.01f; // 颜色偏差阈值 for (int n = 0; n < nSize; ++n) { const Color& viewColor = validViews[n].second; float colorDistance = cv::norm(avgColor2 - viewColor); // printf("colorDistance=%f\n", colorDistance); float viewLuminance = MeshTexture::GetLuminance(viewColor); - // if ((colorDistance<=maxColorDeviation)&& - // (viewLuminance<=maxLuminance)&& - // (viewLuminance>=minLuminance)){ - if (colorDistance <= maxColorDeviation) { - validIndices.push_back(n); + if ((colorDistance<=maxColorDeviation)&& + (viewLuminance<=120.0f)){ + // if (colorDistance <= maxColorDeviation) { + // validIndices.push_back(n); } } @@ -3145,10 +3521,12 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { - virtualFaceData.quality = avgQuality; + // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA - virtualFaceData.color = avgColor; + // virtualFaceData.color = avgColor; #endif + virtualFaceData.quality = medianQuality; + virtualFaceData.color = medianColor; // virtualFaceData.color = sortedLuminViews[0].second; /* @@ -3169,8 +3547,12 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { + const Color& viewColor = sortedViews[idx].second; + float colorDistance = cv::norm(avgColor - viewColor); + float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); + totalQuality2 += validViews[idx].first; - totalColor2 += validViews[idx].second; + totalColor2 += validViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); @@ -3186,102 +3568,78 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA virtualFaceData.color /= processedFaces; #endif */ + // 如果所有视图都被排除,保留原始平均值 + if (validIndices.empty()) { + // virtualFaceData.quality = avgQuality; + #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA + // virtualFaceData.color = avgColor; + #endif + virtualFaceData.quality = medianQuality; + virtualFaceData.color = medianColor; + } + else { + // 使用过滤后的视图重新计算平均值 + float totalQuality2 = 0.0f; + Color totalColor2 = Color(0,0,0); + for (int idx : validIndices) { + const Color& viewColor = sortedViews[idx].second; + float colorDistance = cv::norm(avgColor - viewColor); + float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); + + totalQuality2 += validViews[idx].first; + totalColor2 += validViews[idx].second * weight; + } + virtualFaceData.quality = totalQuality2 / validIndices.size(); + virtualFaceData.color = totalColor2 / validIndices.size(); + } } } else { - float maxLuminance2 = 200.0f; - float minLuminance2 = 20.0f; - /* - std::vector validIndices; - float maxColorDeviation = 1.0f; // 颜色偏差阈值 - float maxLuminanceDeviation = 0.1f; + // 使用鲁棒的统计方法计算颜色和亮度的中心值 + const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; + const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; + const float medianLuminance = ComputeMedianLuminance(sortedViews); + + // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 + const float colorMAD = ComputeColorMAD(sortedViews, medianColor); + const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); - for (int n = 0; n < nSize; ++n) { + // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) + const float maxColorDeviation = 0.01f * colorMAD; + const float maxLuminanceDeviation = 0.01f * luminanceMAD; + + std::vector validIndices; + for (int n = 0; n < sortedViews.size(); ++n) { const Color& viewColor = sortedViews[n].second; - float colorDistance = cv::norm(avgColor - viewColor); - // printf("colorDistance=%f\n", colorDistance); - - float viewLuminance = MeshTexture::GetLuminance(viewColor); - float luminanceDistance = cv::norm(avgLuminance - viewLuminance); - // printf("viewLuminance=%f\n", viewLuminance); - - if ((colorDistance<=maxColorDeviation)&& - (viewLuminance<=maxLuminance2)&& - (viewLuminance>=minLuminance2)){ - // if ((colorDistance <= maxColorDeviation) && - // (luminanceDistance <= maxLuminanceDeviation)) { + const float viewLuminance = MeshTexture::GetLuminance(viewColor); + + const float colorDistance = cv::norm(viewColor - medianColor); + const float luminanceDistance = std::abs(viewLuminance - medianLuminance); + + // if (colorDistance <= maxColorDeviation && + // luminanceDistance <= maxLuminanceDeviation) + { validIndices.push_back(n); } } if (validIndices.empty()) { - for (int n = 0; n < nSize; ++n) { - const Color& viewColor = sortedViews[n].second; - float colorDistance = cv::norm(avgColor - viewColor); - - if (colorDistance<=maxColorDeviation){ - validIndices.push_back(n); - } - } - } - - if (validIndices.empty()) { - for (int n = 0; n < nSize; ++n) { - const Color& viewColor = sortedViews[n].second; - float viewLuminance = MeshTexture::GetLuminance(viewColor); - float luminanceDistance = cv::norm(avgLuminance - viewLuminance); - - if (luminanceDistance <= maxLuminanceDeviation){ - validIndices.push_back(n); - } - } - } - if (validIndices.empty()) { - for (int n = 0; n < nSize; ++n) { - const Color& viewColor = sortedViews[n].second; - float viewLuminance = MeshTexture::GetLuminance(viewColor); - - if ((viewLuminance<=maxLuminance)&& - (viewLuminance>=minLuminance)){ - validIndices.push_back(n); - } - } - } - // 如果所有视图都被排除,保留原始平均值 - if (validIndices.empty()) { - // virtualFaceData.quality = avgQuality; - // virtualFaceData.color = avgColor; - // virtualFaceData.quality = avgQuality; - - // ASSERT(processedFaces > 0); - // virtualFaceData.quality /= processedFaces; - // #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA - // virtualFaceData.color /= processedFaces; - // #endif - + virtualFaceData.quality = medianQuality; + virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 - totalQuality = 0.0f; - totalColor = Color(0,0,0); + float totalQuality2 = 0.0f; + Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { - totalQuality += sortedViews[idx].first; - totalColor += sortedViews[idx].second; + totalQuality2 += validViews[idx].first; + totalColor2 += validViews[idx].second; } - virtualFaceData.quality = totalQuality / validIndices.size(); - virtualFaceData.color = totalColor / validIndices.size(); + virtualFaceData.quality = totalQuality2 / validIndices.size(); + virtualFaceData.color = totalColor2 / validIndices.size(); } - //*/ - - //* - ASSERT(processedFaces > 0); - virtualFaceData.quality /= processedFaces; - #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA - virtualFaceData.color /= processedFaces; - #endif - //*/ } // virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); @@ -3292,22 +3650,227 @@ void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); + + return true; } -#if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_MEDIAN +/* -// decrease the quality of / remove all views in which the face's projection -// has a much different color than in the majority of views -bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const +void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const { - // consider as outlier if the absolute difference to the median is outside this threshold - if (thOutlier <= 0) - thOutlier = 0.15f*255.f; - - // init colors array - if (faceDatas.size() <= 3) - return false; - FloatArr channels[3]; + float thMaxColorDeviation = 0.000001f; + if (meshCurvatures.empty()) { + ComputeFaceCurvatures(); + } + + const float ratioAngleToQuality(0.67f); + const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); + Mesh::FaceIdxArr remainingFaces(faces.size()); + std::iota(remainingFaces.begin(), remainingFaces.end(), 0); + std::vector selectedFaces(faces.size(), false); + cQueue currentVirtualFaceQueue; + std::unordered_set queuedFaces; + + // Precompute average color for each face + Colors faceColors; // 创建一个空列表 + faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) + for (size_t i = 0; i < faces.size(); ++i) { + faceColors.push_back(Color::ZERO); // 逐个添加元素 + } + for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { + const FaceDataArr& faceDatas = facesDatas[idxFace]; + if (faceDatas.empty()) continue; + Color sumColor = Color::ZERO; + for (const FaceData& fd : faceDatas) { + sumColor += fd.color; + } + faceColors[idxFace] = sumColor / faceDatas.size(); + } + + do { + const FIndex startPos = RAND() % remainingFaces.size(); + const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; + + // 动态法线阈值 + const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; + const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 + const float dynamicCosTh = COS(FD2R(dynamicThreshold)); + + ASSERT(currentVirtualFaceQueue.IsEmpty()); + const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; + const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; + + // 检查中心面片是否包含无效视图 + bool bHasInvalidView = false; + int nInvalidViewCount = 0; + int nTotalViewCount = 0; + for (const FaceData& faceData : centerFaceDatas) { + if (faceData.bInvalidFacesRelative) { + bHasInvalidView = true; + ++nInvalidViewCount; + } + ++nTotalViewCount; + } + + std::vector> sortedViews; + std::vector> sortedLuminViews; + std::vector> validViews; + sortedViews.reserve(centerFaceDatas.size()); + for (const FaceData& fd : centerFaceDatas) { + if (fd.bInvalidFacesRelative) { + sortedViews.emplace_back(fd.quality, fd.color); + sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); + } else { + sortedViews.emplace_back(fd.quality, fd.color); + sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); + validViews.emplace_back(fd.quality, fd.color); + } + } + std::sort(sortedViews.begin(), sortedViews.end(), + [](const auto& a, const auto& b) { return a.first > b.first; }); + std::sort(validViews.begin(), validViews.end(), + [](const auto& a, const auto& b) { return a.first > b.first; }); + + int nSize = sortedViews.size(); + // 计算初始平均值 + float totalQuality = 0.0f; + Color totalColor(0,0,0); + for (int n = 0; n < nSize; ++n) { + totalQuality += sortedViews[n].first; + totalColor += sortedViews[n].second; + } + const float avgQuality = totalQuality / nSize; + const Color avgColor = totalColor / nSize; + + float totalLuminance = MeshTexture::GetLuminance(totalColor); + float avgLuminance = totalLuminance / nSize; + std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), + [avgLuminance](const auto& a, const auto& b) { + float luminDistA = cv::norm(avgLuminance - a.first); + float luminDistB = cv::norm(avgLuminance - b.first); + return luminDistA < luminDistB; }); + + // select the common cameras + Mesh::FaceIdxArr virtualFace; + FaceDataArr virtualFaceDatas; + if (centerFaceDatas.empty()) { + virtualFace.emplace_back(virtualFaceCenterFaceID); + selectedFaces[virtualFaceCenterFaceID] = true; + const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); + ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); + remainingFaces.RemoveAtMove(posToErase); + } else { + const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); + currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); + queuedFaces.clear(); + do { + const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); + currentVirtualFaceQueue.PopHead(); + // check for condition to add in current virtual face + // normal angle smaller than thMaxNormalDeviation degrees + const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; + const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); + if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 + continue; + // check if current face is seen by all cameras in selectedCams + ASSERT(!selectedCams.empty()); + if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) + continue; + + // Check color similarity + const Color& centerColor = faceColors[virtualFaceCenterFaceID]; + const Color& currentColor = faceColors[currentFaceId]; + if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) { + float colorDistance = cv::norm(centerColor - currentColor); + if (colorDistance > thMaxColorDeviation) + { + continue; // Skip if color difference is too large + } + } + + // remove it from remaining faces and add it to the virtual face + { + const auto posToErase = remainingFaces.FindFirst(currentFaceId); + ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); + remainingFaces.RemoveAtMove(posToErase); + selectedFaces[currentFaceId] = true; + virtualFace.push_back(currentFaceId); + } + // add all new neighbors to the queue + const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; + for (int i = 0; i < 3; ++i) { + const FIndex fIdx = ffaces[i]; + if (fIdx == NO_ID) + continue; + if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { + currentVirtualFaceQueue.AddTail(fIdx); + queuedFaces.emplace(fIdx); + } + } + } while (!currentVirtualFaceQueue.IsEmpty()); + // compute virtual face quality and create virtual face + for (IIndex idxView: selectedCams) { + FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); + virtualFaceData.quality = 0; + virtualFaceData.idxView = idxView; + #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA + virtualFaceData.color = Point3f::ZERO; + #endif + int invalidQuality = 0; + Color invalidColor = Point3f::ZERO; + unsigned processedFaces(0); + bool bInvalidFacesRelative = false; + int invalidCount = 0; + for (FIndex fid : virtualFace) { + const FaceDataArr& faceDatas = facesDatas[fid]; + for (FaceData& faceData: faceDatas) { + // 填充: 只处理当前视图的数据,累加质量和颜色 + if (faceData.idxView == idxView) { + virtualFaceData.quality += faceData.quality; + #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA + virtualFaceData.color += faceData.color; + #endif + processedFaces++; + if (faceData.bInvalidFacesRelative) { + invalidCount++; + } + break; // 每个面片每个视图只应有一个数据,找到后退出内层循环 + } + } + } + // 填充: 后处理,计算平均值和设置无效标志 + if (processedFaces > 0) { + virtualFaceData.quality /= processedFaces; + #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA + virtualFaceData.color /= processedFaces; + #endif + virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces / 2); // 如果超过一半面片无效,则标记虚拟面无效 + } else { + // 如果没有找到任何数据,移除刚添加的virtualFaceData + virtualFaceDatas.pop_back(); + } + } + ASSERT(!virtualFaceDatas.empty()); + } + virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); + virtualFaces.emplace_back(std::move(virtualFace)); + } while (!remainingFaces.empty()); +} +*/ +#if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_MEDIAN + +// decrease the quality of / remove all views in which the face's projection +// has a much different color than in the majority of views +bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const +{ + // consider as outlier if the absolute difference to the median is outside this threshold + if (thOutlier <= 0) + thOutlier = 0.15f*255.f; + + // init colors array + if (faceDatas.size() <= 3) + return false; + FloatArr channels[3]; for (int c=0; c<3; ++c) channels[c].resize(faceDatas.size()); FOREACH(i, faceDatas) { @@ -4670,12 +5233,12 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT // create texture patches { - printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); + // printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // compute face normals and smoothen them scene.mesh.SmoothNormalFaces(); - printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); + // printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); - const bool bUseVirtualFaces(minCommonCameras > 0); + bool bUseVirtualFaces(minCommonCameras > 0); // list all views for each face FaceDataViewArr facesDatas; @@ -4689,28 +5252,67 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT Graph graph; LabelArr labels; + // 为每个面片单独决定是否使用虚拟面算法 + Mesh::FaceIdxArr virtualFacesI; // 使用虚拟面算法的面片 + Mesh::FaceIdxArr perFaceFaces; // 使用逐面算法的面片 + + /* + bool bVirtualFacesSuccess = false; + if (bUseVirtualFaces) + { + // 1) create FaceToVirtualFaceMap + FaceDataViewArr virtualFacesDatas; + VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID + + bVirtualFacesSuccess = CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); + if (!virtualFaces.empty()) + { + bVirtualFacesSuccess = true; + } + if (!bVirtualFacesSuccess) { + bUseVirtualFaces = false; + DEBUG_EXTRA("Warning: Failed to create virtual faces. Falling back to per-face view selection."); + } + } + */ + // construct and use virtual faces for patch creation instead of actual mesh faces; // the virtual faces are composed of coplanar triangles sharing same views if (bUseVirtualFaces) { Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it + // 标记使用虚拟面算法的面片 + std::vector isVirtualFace(faces.size(), true); + // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID // CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); // CreateVirtualFaces3(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); // CreateVirtualFaces4(facesDatas, virtualFacesDatas, virtualFaces, mapFaceToVirtualFace, minCommonCameras); - CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); + CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras); + size_t controlCounter(0); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; for (FIndex idxFace : vf) { mapFaceToVirtualFace[idxFace] = idxVF; + // isVirtualFace[idxFace] = true; + // virtualFacesI.push_back(idxFace); ++controlCounter; } } + // 标记使用逐面算法的面片 + FOREACH(f, faces) { + if (isVirtualFace[f]) { + perFaceFaces.push_back(f); + } + } + + printf("perFaceFaces.size = %d\n", perFaceFaces.size()); + ASSERT(controlCounter == faces.size()); // 2) create function to find virtual faces neighbors VirtualFaceIdxsArr virtualFaceNeighbors; @@ -4735,36 +5337,6 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT } } //*/ - /* - FOREACH(idxVirtualFace, virtualFaces) { - const Mesh::FaceIdxArr& vf = virtualFaces[idxVirtualFace]; - Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVirtualFace]; - - for (FIndex idxFace : vf) { - const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; - for (int i = 0; i < 3; ++i) { - const FIndex fAdj(adjFaces[i]); - if (fAdj == NO_ID) continue; - - // 查找邻居面所属的虚拟面 - FIndex adjVF = NO_ID; - for (FIndex vfIdx = 0; vfIdx < virtualFaces.size(); vfIdx++) { - if (virtualFaces[vfIdx].Find(fAdj) != Mesh::FaceIdxArr::NO_INDEX) { - adjVF = vfIdx; - break; - } - } - - if (adjVF == NO_ID || adjVF == idxVirtualFace) - continue; - - if (vfNeighbors.Find(adjVF) == Mesh::FaceIdxArr::NO_INDEX) { - vfNeighbors.emplace_back(adjVF); - } - } - } - } - //*/ } // 3) use virtual faces to build the graph // 4) assign images to virtual faces @@ -4787,30 +5359,6 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT } } //*/ - /* - FOREACH(idxVirtualFace, virtualFaces) { - const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; - for (FIndex idxVirtualFaceAdj: afaces) { - if (idxVirtualFace >= idxVirtualFaceAdj) - continue; - - // +++ 添加边界检查 +++ - if (idxVirtualFace >= virtualFacesDatas.size() || - idxVirtualFaceAdj >= virtualFacesDatas.size()) { - VERBOSE("Warning: virtual face index out of bounds: %u or %u >= %zu", - idxVirtualFace, idxVirtualFaceAdj, virtualFacesDatas.size()); - continue; - } - - const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); - const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); - if (bInvisibleFace || bInvisibleFaceAdj) - continue; - - boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); - } - } - //*/ // 这里通过深度图判断virtualFaces是否要为invalid // CheckInvalidFaces(virtualFacesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) @@ -4819,7 +5367,14 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); // assign the best view to each face - labels.resize(faces.size()); { + labels.resize(faces.size()); + labelsInvalid.resize(faces.size()); + + FOREACH(l, labelsInvalid) { + labelsInvalid[l] = NO_ID; + } + + { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: virtualFacesDatas) { @@ -4878,8 +5433,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT float invalidQuality; // 当视角不足时,只保留最佳视角 - // if (numViews <= minSingleView) { - if (true) { + if (numViews <= minSingleView) { + // if (true) { std::vector> sortedViews; sortedViews.reserve(faceDatas.size()); for (const FaceData& fd : faceDatas) { @@ -4911,8 +5466,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT const float baseCostScale = 0.1f; // 基础成本缩放系数 const float costStep = 0.3f; // 相邻视角成本增量 - // if (bInvalidFacesRelative && sortedViews.size() == 0) - if (bInvalidFacesRelative) + if (bInvalidFacesRelative && sortedViews.size() == 0) + // if (bInvalidFacesRelative) { // const Label label = (Label)sortedViews[0].second + 1; const Label label = (Label)invalidView + 1; @@ -5024,6 +5579,28 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT labels[l] = virtualLabels[mapFaceToVirtualFace[l]]; } */ + + /* + if (!perFaceFaces.empty()) { + FOREACH(f, perFaceFaces) { + FaceDataArr& faceData = facesDatas[f]; + if (faceData.empty()) continue; + + // 选择最佳视图 + float bestQuality = -1; + IIndex bestView = NO_ID; + for (const FaceData& data : faceData) { + if (data.quality > bestQuality) { + bestQuality = data.quality; + bestView = data.idxView; + } + } + + labels[f] = bestView; + } + } + //*/ + //* // 修改后安全版本 FOREACH(l, labels) { if (l < mapFaceToVirtualFace.size()) { @@ -5041,6 +5618,55 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT l, mapFaceToVirtualFace.size()-1); } } + //*/ + // 修改后安全版本 + /* + FOREACH(l, labels) { + if (l < mapFaceToVirtualFace.size()) { + const size_t virtualIdx = mapFaceToVirtualFace[l]; + if (virtualIdx < virtualLabels.size()) { + labels[l] = virtualLabels[virtualIdx]; + } else { + // 虚拟面映射失败,回退到非虚拟面方法:选择最佳视图 + const FaceDataArr& faceDatas = facesDatas[l]; + if (!faceDatas.empty()) { + // 找到质量最高的视角 + float maxQuality = -1; + IIndex bestView = NO_ID; + for (const FaceData& fd : faceDatas) { + if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { + maxQuality = fd.quality; + bestView = fd.idxView; + } + } + labels[l] = bestView; + } else { + labels[l] = NO_ID; + } + DEBUG_EXTRA("Warning: Invalid virtual face index for face %u: %u (max: %u) - using best view %u", + l, virtualIdx, virtualLabels.size()-1, labels[l]); + } + } else { + // 面片索引越界,同样回退到非虚拟面方法 + const FaceDataArr& faceDatas = facesDatas[l]; + if (!faceDatas.empty()) { + float maxQuality = -1; + IIndex bestView = NO_ID; + for (const FaceData& fd : faceDatas) { + if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { + maxQuality = fd.quality; + bestView = fd.idxView; + } + } + labels[l] = bestView; + } else { + labels[l] = NO_ID; + } + DEBUG_EXTRA("Warning: Face index out of bounds: %u (max: %u) - using best view %u", + l, mapFaceToVirtualFace.size()-1, labels[l]); + } + } + */ #endif } @@ -5100,103 +5726,250 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; // start patch creation starting directly from individual faces - if (!bUseVirtualFaces) { - // assign the best view to each face - labels.resize(faces.size()); { - // normalize quality values - float maxQuality(0); - for (const FaceDataArr& faceDatas: facesDatas) { - for (const FaceData& faceData: faceDatas) - if (maxQuality < faceData.quality) - maxQuality = faceData.quality; + + if (bUseVirtualFaces) + // if (false) + { + // normalize quality values + float maxQuality(0); + /* + for (const FaceDataArr& faceDatas: facesDatas) { + for (const FaceData& faceData: faceDatas) + if (maxQuality < faceData.quality) + maxQuality = faceData.quality; + } + */ + FOREACH(idxFace, facesDatas) { + if (labels[idxFace] != NO_ID) + continue; + const FaceDataArr& faceDataArr = facesDatas[idxFace]; + for (const FaceData& faceData : faceDataArr) { + if (maxQuality < faceData.quality) + maxQuality = faceData.quality; } - Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); - for (const FaceDataArr& faceDatas: facesDatas) { - for (const FaceData& faceData: faceDatas) - hist.Add(faceData.quality); + } + Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); + /* + for (const FaceDataArr& faceDatas: facesDatas) { + for (const FaceData& faceData: faceDatas) + hist.Add(faceData.quality); + } + */ + FOREACH(idxFace, facesDatas) { + if (labels[idxFace] != NO_ID) + continue; + const FaceDataArr& faceDataArr = facesDatas[idxFace]; + for (const FaceData& faceData : faceDataArr) { + hist.Add(faceData.quality); } - const float normQuality(hist.GetApproximatePermille(0.95f)); - - #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP - // initialize inference structures - const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); - LBPInference inference; { - inference.SetNumNodes(faces.size()); - inference.SetSmoothCost(SmoothnessPotts); - // inference.SetSmoothCost(SmoothnessLinear); - // inference.SetSmoothCost(NewSmoothness); + } + const float normQuality(hist.GetApproximatePermille(0.95f)); - EdgeOutIter ei, eie; - FOREACH(f, faces) { - for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { - ASSERT(f == (FIndex)ei->m_source); - const FIndex fAdj((FIndex)ei->m_target); - if (f < fAdj) // add edges only once - inference.SetNeighbors(f, fAdj); - } - // set costs for label 0 (undefined) - inference.SetDataCost((Label)0, f, MaxEnergy); + #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP + // initialize inference structures + const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); + LBPInference inference; + { + inference.SetNumNodes(faces.size()); + inference.SetSmoothCost(SmoothnessPotts); + // inference.SetSmoothCost(SmoothnessLinear); + // inference.SetSmoothCost(NewSmoothness); + + EdgeOutIter ei, eie; + FOREACH(f, faces) { + for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { + ASSERT(f == (FIndex)ei->m_source); + const FIndex fAdj((FIndex)ei->m_target); + if (f < fAdj) // add edges only once + inference.SetNeighbors(f, fAdj); } + // set costs for label 0 (undefined) + inference.SetDataCost((Label)0, f, MaxEnergy); } + } - /* - // set data costs for all labels (except label 0 - undefined) - FOREACH(f, facesDatas) { - const FaceDataArr& faceDatas = facesDatas[f]; - const size_t numViews = faceDatas.size(); - unsigned minViews=3; - float dataWeightFactor=2.0f; - // LOG_OUT() << "FaceViewSelection numViews=" << numViews << std::endl; - const float factor = (numViews < minViews) ? dataWeightFactor : 1.0f; - for (const FaceData& faceData: faceDatas) { - const Label label((Label)faceData.idxView+1); - const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); - const float dataCost((1.f-normalizedQuality)*MaxEnergy * factor); - inference.SetDataCost(label, f, dataCost); - } + /* + for (const FaceDataArr& faceDatas : facesDatas) { + for (const FaceData& faceData : faceDatas) { + if (faceData.quality > maxQuality) + maxQuality = faceData.quality; } - //*/ - /* - FOREACH(f, facesDatas) { - const FaceDataArr& faceDatas = facesDatas[f]; - const size_t numViews = faceDatas.size(); - const unsigned minSingleView = 2; // 当可用视角<=5时强制单视图 - - // 当视角不足时,只保留最佳视角 - // if (numViews <= minSingleView) { - if (true) { - // 找到质量最高的视角 - float maxQuality = 0; - IIndex bestView = NO_ID; - for (const FaceData& fd : faceDatas) { - if (fd.quality > maxQuality) { - maxQuality = fd.quality; - bestView = fd.idxView; + } + for (const FaceDataArr& faceDatas : facesDatas) { + for (const FaceData& faceData : faceDatas) + hist.Add(faceData.quality); + } + */ + + FOREACH(f, faces) { + if (labels[f] != NO_ID) { + const Label assignedLabel = (Label)(labels[f] + 1); + inference.SetDataCost(assignedLabel, f, 0); + } + } + + FOREACH(f, facesDatas) + { + if (labels[f] != NO_ID) + continue; + + const FaceDataArr& faceDatas = facesDatas[f]; + const size_t numViews = faceDatas.size(); + const unsigned minSingleView = 1; // 与虚拟面模式相同的阈值 + + bool bInvalidFacesRelative = false; + IIndex invalidView; + float invalidQuality; + + { + + std::vector> sortedViews; + sortedViews.reserve(faceDatas.size()); + for (const FaceData& fd : faceDatas) + { + + if (fd.bInvalidFacesRelative) + { + bInvalidFacesRelative = true; + // sortedViews.emplace_back(fd.quality, fd.idxView); + invalidView = fd.idxView; + invalidQuality = fd.quality; + } + else + { + // if (fd.quality<=999.0) + { + sortedViews.emplace_back(fd.quality, fd.idxView); + // printf("1fd.quality=%f\n", fd.quality); } + // else + // printf("2fd.quality=%f\n", fd.quality); } - // 只设置最佳视角的数据项,其他设为MaxEnergy - for (const FaceData& fd : faceDatas) { - const Label label = (Label)fd.idxView + 1; - // const float cost = (fd.idxView == bestView) ? - // (1.f - fd.quality/normQuality) * MaxEnergy : - // MaxEnergy; - const float cost = (fd.idxView == bestView) ? - (1.f - fd.quality/normQuality) * MaxEnergy : - 0; + } + + std::sort(sortedViews.begin(), sortedViews.end(), + [](const auto& a, const auto& b) { return a.first > b.first; }); + // 设置数据成本:最佳视角成本最低,其他按质量排序递增 + const float baseCostScale = 0.1f; // 基础成本缩放系数 + const float costStep = 0.3f; // 相邻视角成本增量 + + for (const auto& image : images) + { + // printf("image name=%s\n", image.name.c_str()); + } + + if (bInvalidFacesRelative && sortedViews.size() == 0) + { + // const Label label = (Label)sortedViews[0].second + 1; + const Label label = (Label)invalidView + 1; + float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; + // float cost = 0; + inference.SetDataCost(label, f, cost); + continue; + } + + // printf("sortedViews size=%d\n", sortedViews.size()); + for (size_t i = 0; i < sortedViews.size(); ++i) + { + const Label label = (Label)sortedViews[i].second + 1; + float cost; + + std::string strPath = images[label-1].name; + size_t lastSlash = strPath.find_last_of("/\\"); + if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 + else lastSlash++; // 跳过分隔符 + + // 查找扩展名分隔符 '.' 的位置 + size_t lastDot = strPath.find_last_of('.'); + if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 + + // 截取文件名(不含路径和扩展名) + std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); + + if (i == 0) { + // if (true) { + // 最佳视角 + // cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; + cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; + // cost = 0; + inference.SetDataCost(label, f, cost); + } else { + // 其他视角:成本随排名线性增加 + int stepIndex = i; + // if (i > 3) + // stepIndex = i - 3; + cost = MaxEnergy * (baseCostScale + costStep * stepIndex); + // 确保成本不超过MaxEnergy + cost = std::min(cost, MaxEnergy); + // cost = MaxEnergy; inference.SetDataCost(label, f, cost); } - } - else { - // 正常处理多视角情况 - for (const FaceData& faceData : faceDatas) { - const Label label = (Label)faceData.idxView + 1; - const float normalizedQuality = faceData.quality/normQuality; - const float dataCost = (1.f - normalizedQuality) * MaxEnergy; - inference.SetDataCost(label, f, dataCost); + } + + } + } + + // assign the optimal view (label) to each face + // (label 0 is reserved as undefined) + // inference.Optimize(); + + // extract resulting labeling + FOREACH(l, labels) + { + if (labels[l] != NO_ID) + continue; + const Label label(inference.GetLabel(l)); + ASSERT(label < images.size()+1); + if (label > 0) + { + labels[l] = label-1; + labelsInvalid[l] = labels[l]; + } + } + #endif + } + + if (!bUseVirtualFaces) + { + // assign the best view to each face + labels.resize(faces.size()); + { + // normalize quality values + float maxQuality(0); + for (const FaceDataArr& faceDatas: facesDatas) { + for (const FaceData& faceData: faceDatas) + if (maxQuality < faceData.quality) + maxQuality = faceData.quality; + } + Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); + for (const FaceDataArr& faceDatas: facesDatas) { + for (const FaceData& faceData: faceDatas) + hist.Add(faceData.quality); + } + const float normQuality(hist.GetApproximatePermille(0.95f)); + + #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP + // initialize inference structures + const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); + LBPInference inference; { + inference.SetNumNodes(faces.size()); + inference.SetSmoothCost(SmoothnessPotts); + // inference.SetSmoothCost(SmoothnessLinear); + // inference.SetSmoothCost(NewSmoothness); + + EdgeOutIter ei, eie; + FOREACH(f, faces) { + for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { + ASSERT(f == (FIndex)ei->m_source); + const FIndex fAdj((FIndex)ei->m_target); + if (f < fAdj) // add edges only once + inference.SetNeighbors(f, fAdj); } + // set costs for label 0 (undefined) + inference.SetDataCost((Label)0, f, MaxEnergy); } } - //*/ + //* for (const FaceDataArr& faceDatas : facesDatas) { for (const FaceData& faceData : faceDatas) { @@ -5285,61 +6058,6 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); - // printf("scene.is_face_visible %d, %d\n", label-1, f); - // printf(images[label]); - // printf("name=%s\n", images[label-1].name.c_str()); - // if (!scene.is_face_visible(strName.c_str(), f)) - // continue; - - /* - // if (strName=="122_2") // 7613212046 11_2 122_2 - // if (strName=="63_2" || strName=="62_2") // 274658 123_2 12_2 63_2 62_2 - // if (strName=="72_2" || strName=="82_2" || strName=="83_2") // 274658 72_2 12_2 112_2 14_2 82_2 83_2 - // if (strName=="12_2" || strName=="112_2" || strName=="14_2" || - // strName=="82_2" || strName=="83_2" || strName=="123_2") - if (strName=="82_2") - // if (strName=="122_2") // 7613212046 11_2 122_2 - // 274658 72_2 12_2 112_2 14_2 82_2 83_2 123_2 - { - // printf("name=%s\n", images[label-1].name.c_str()); - cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; - inference.SetDataCost(label, f, cost); - continue; - } - //*/ - - /* - // 添加临界区保护 - bool skipFace = false; - #ifdef TEXOPT_USE_OPENMP - #pragma omp critical(invalid_faces_access) - #endif - std::lock_guard lock(*scene.mesh.invalidFaces.mtx); - bool invalidFace = (scene.mesh.invalidFacesAll[label].data.find(f) != scene.mesh.invalidFacesAll[label].data.end()); - if (invalidFace) - { - if (i < 1) - {// 最佳视角 - cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; - // cost = 0; - inference.SetDataCost(label, f, cost); - } - else - { - // 其他视角:成本随排名线性增加 - int stepIndex = i; - // if (i > 3) - // stepIndex = i - 3; - cost = MaxEnergy * (baseCostScale + costStep * stepIndex); - // 确保成本不超过MaxEnergy - cost = std::min(cost, MaxEnergy); - // cost = MaxEnergy; - inference.SetDataCost(label, f, cost); - } - continue; - } - //*/ - if (i == 0) { // if (true) { // 最佳视角 @@ -5359,22 +6077,7 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT inference.SetDataCost(label, f, cost); } } - // float maxQualityLocal = 0; - // IIndex bestView = NO_ID; - //for (const FaceData& fd : faceDatas) { - // if (fd.quality > maxQualityLocal) { - // maxQualityLocal = fd.quality; - // bestView = fd.idxView; - // } - //} - // for (const FaceData& fd : faceDatas) { - // const Label label = (Label)fd.idxView + 1; - // // 核心修改:仅最优视角用质量计算成本,其他视角设为MaxEnergy - // const float cost = (fd.idxView == bestView) - // ? (1.f - fd.quality / normQuality) * MaxEnergy - // : MaxEnergy; - // inference.SetDataCost(label, f, cost); - // } + } else { for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; @@ -5525,11 +6228,24 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT texturePatch.label = NO_ID; texturePatches.back().faces.Insert(f); } else { - if (texturePatch.faces.empty()) { - texturePatch.label = label; - texturePatch.faces.reserve(sizes[c]); + + if ((labelsInvalid[f] != NO_ID) && false) + { + if (texturePatch.faces.empty()) { + texturePatch.label = label; + // texturePatch.faces.reserve(sizes[c]); + texturePatch.faces.reserve(sizes[c]); + } + texturePatch.faces = {f}; + } + else + { + if (texturePatch.faces.empty()) { + texturePatch.label = label; + texturePatch.faces.reserve(sizes[c]); + } + texturePatch.faces.Insert(f); } - texturePatch.faces.Insert(f); } } // remove all patches with invalid label (except the last one) @@ -5574,10 +6290,10 @@ bool MeshTexture::FaceViewSelection4( unsigned minCommonCameras, float fOutlierT // create texture patches { - printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); + // printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // compute face normals and smoothen them scene.mesh.SmoothNormalFaces(); - printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); + // printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // list all views for each face FaceDataViewArr facesDatas; @@ -5964,6 +6680,10 @@ void MeshTexture::CreateSeamVertices() for (const PairIdx& edge: seamEdges) { // store edge for the later seam optimization ASSERT(edge.i < edge.j); + + // if (labelsInvalid[edge.i] != NO_ID || labelsInvalid[edge.j] != NO_ID ) + // continue; + const uint32_t idxPatch0(mapIdxPatch[components[edge.i]]); const uint32_t idxPatch1(mapIdxPatch[components[edge.j]]); ASSERT(idxPatch0 != idxPatch1 || idxPatch0 == numPatches); @@ -6017,10 +6737,24 @@ void MeshTexture::GlobalSeamLeveling3() { ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); + + // Create a boolean array to mark invalid vertices + BoolArr vertexInvalid(vertices.size()); + vertexInvalid.Memset(false); + FOREACH(f, faces) { + if (labelsInvalid[f] != NO_ID) { + const Face& face = faces[f]; + for (int v=0; v<3; ++v) + vertexInvalid[face[v]] = true; + } + } + // find the patch ID for each vertex PatchIndices patchIndices(vertices.size()); patchIndices.Memset(0); FOREACH(f, faces) { + // if (labelsInvalid[f] != NO_ID) + // continue; const uint32_t idxPatch(mapIdxPatch[components[f]]); const Face& face = faces[f]; for (int v=0; v<3; ++v) @@ -6077,8 +6811,10 @@ void MeshTexture::GlobalSeamLeveling3() const uint32_t idxPatchAdj(itVAdj); if (idxPatch == idxPatchAdj) { const MatIdx colAdj(vertpatch2rows[vAdj].at(idxPatchAdj)); - rows.emplace_back(rowsGamma, col, lambda); - rows.emplace_back(rowsGamma, colAdj, -lambda); + float currentLambda = (vertexInvalid[v] || vertexInvalid[vAdj]) ? 0.01f : 0.1f; + // float currentLambda = 1.0f; + rows.emplace_back(rowsGamma, col, currentLambda); + rows.emplace_back(rowsGamma, colAdj, -currentLambda); ++rowsGamma; } } @@ -6816,6 +7552,11 @@ void MeshTexture::GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLevel // const unsigned minPatchSize = 20; RectsBinPack::RectWIdxArr unplacedRects(texturePatches.size()); FOREACH(i, texturePatches) { + if (texturePatches[i].label == NO_ID) { + // 将无效面片区域填充为绿色 + // texturesDiffuse[i](texturePatches[i].rect).setTo(cv::Scalar(0, 255, 0)); // BGR格式,绿色 + // continue; + } // LOG_OUT() << "Third loop completed" << std::endl; if (maxTextureSize > 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { @@ -6871,6 +7612,7 @@ void MeshTexture::GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLevel if (textureSize == maxTextureSize || unplacedRects.empty()) { // create texture image placedRects.emplace_back(std::move(newPlacedRects)); + // Pixel8U colEmpty2=Pixel8U(0,0,255); texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); textureSize = 0; } else { @@ -6909,16 +7651,49 @@ void MeshTexture::GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLevel patch = patch.t(); x = 1; y = 0; } + patch.copyTo(texturesDiffuse[idxTexture](rect)); } else { + //* auto it = texturePatch.faces.begin(); while (it != texturePatch.faces.end()) { emptyFaceIndexes.push_back(*it); ++it; } + //*/ + /* + // 处理无效贴片:使用备用纹理 + if (alternativeTexture != nullptr) { + // 使用备用纹理进行采样 + cv::Mat patch(rect.size(), CV_8UC3); + for (int r = 0; r < patch.rows; ++r) { + for (int c = 0; c < patch.cols; ++c) { + // 计算UV坐标:将像素位置映射到备用纹理的UV空间 + float u = (float)c / patch.cols; + float v = (float)r / patch.rows; + // 从备用纹理中采样 + int xSrc = static_cast(u * alternativeTexture->width()); + int ySrc = static_cast(v * alternativeTexture->height()); + xSrc = std::min(std::max(xSrc, 0), alternativeTexture->width() - 1); + ySrc = std::min(std::max(ySrc, 0), alternativeTexture->height() - 1); + Pixel8U color = (*alternativeTexture)(ySrc, xSrc); + patch.at(r, c) = color; + } + } + // Pixel8U colEmpty2=Pixel8U(0,0,255); + // cv::Mat patch2(rect.size(), CV_8UC3, cv::Scalar(colEmpty2.b, colEmpty2.g, colEmpty2.r)); + // patch2.copyTo(texturesDiffuse[idxTexture](rect)); + patch.copyTo(texturesDiffuse[idxTexture](rect)); + } else { + // 没有备用纹理,使用默认颜色 + // Pixel8U colEmpty2=Pixel8U(0,0,255); + cv::Mat patch(rect.size(), CV_8UC3, cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); + patch.copyTo(texturesDiffuse[idxTexture](rect)); + } + */ } // compute final texture coordinates const TexCoord offset(rect.tl()); @@ -7285,6 +8060,17 @@ void MeshTexture::LocalSeamLeveling() ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); + // Create a boolean array to mark invalid vertices + BoolArr vertexInvalid(vertices.size()); + vertexInvalid.Memset(false); + FOREACH(f, faces) { + if (labelsInvalid[f] != NO_ID) { + const Face& face = faces[f]; + for (int v=0; v<3; ++v) + vertexInvalid[face[v]] = true; + } + } + // adjust texture patches locally, so that the border continues smoothly inside the patch #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) @@ -7294,6 +8080,24 @@ void MeshTexture::LocalSeamLeveling() #endif const uint32_t idxPatch((uint32_t)i); const TexturePatch& texturePatch = texturePatches[idxPatch]; + + // Check if this texture patch contains any invalid vertices + bool hasInvalidVertex = false; + for (const FIndex idxFace: texturePatch.faces) { + const Face& face = faces[idxFace]; + for (int v=0; v<3; ++v) { + if (vertexInvalid[face[v]]) { + hasInvalidVertex = true; + break; + } + } + if (hasInvalidVertex) break; + } + + // Set bias based on vertex validity: 0.01 if any vertex is invalid, else 1 + const float bias = hasInvalidVertex ? 0.1f : 1.0f; + // const float bias = 1.0f; + // extract image const Image8U3& image0(images[texturePatch.label].image); Image32F3 image, imageOrg; @@ -7310,6 +8114,8 @@ void MeshTexture::LocalSeamLeveling() } } data{mask}; for (const FIndex idxFace: texturePatch.faces) { + // if (labelsInvalid[idxFace] != NO_ID) + // continue; const TexCoord* tri = faceTexcoords.data()+idxFace*3; ColorMap::RasterizeTriangle(tri[0], tri[1], tri[2], data); } @@ -7397,7 +8203,7 @@ void MeshTexture::LocalSeamLeveling() // keep only the exterior tripe of the given size ProcessMask(mask, 20); // compute texture patch blending - PoissonBlending(imageOrg, image, mask); + PoissonBlending(imageOrg, image, mask, bias); // apply color correction to the patch image cv::Mat imagePatch(image0(texturePatch.rect)); for (int r=0; r>& visible_faces_map, + std::unordered_set& face_visible_relative, + std::map>& edge_faces_map, + std::map>& delete_edge_faces_map, + std::string& basePath) { + // 保存 visible_faces_map + std::ofstream mapFile(basePath + "_visible_faces_map.txt"); + if (mapFile.is_open()) { + for (const auto& entry : visible_faces_map) { + mapFile << entry.first; + for (int face : entry.second) { + mapFile << " " << face; + } + mapFile << "\n"; + } + mapFile.close(); + } + + // 保存 face_visible_relative + std::ofstream relativeFile(basePath + "_face_visible_relative.txt"); + if (relativeFile.is_open()) { + for (int face : face_visible_relative) { + relativeFile << face << "\n"; + } + relativeFile.close(); + } + + std::ofstream mapFile2(basePath + "_edge_faces_map.txt"); + if (mapFile2.is_open()) { + for (const auto& entry : edge_faces_map) { + mapFile2 << entry.first; + for (int face : entry.second) { + mapFile2 << " " << face; + } + mapFile2 << "\n"; + } + mapFile2.close(); + } + + std::ofstream mapFile3(basePath + "_delete_edge_faces_map.txt"); + if (mapFile3.is_open()) { + for (const auto& entry : delete_edge_faces_map) { + mapFile3 << entry.first; + for (int face : entry.second) { + mapFile3 << " " << face; + } + mapFile3 << "\n"; + } + mapFile3.close(); + } +} + +// 从文件加载遮挡数据 +bool Scene::LoadVisibleFacesData(std::map>& visible_faces_map, + std::unordered_set& face_visible_relative, + std::map>& edge_faces_map, + std::map>& delete_edge_faces_map, + std::string& basePath) { + std::ifstream mapFile(basePath + "_visible_faces_map.txt"); + if (!mapFile.is_open()) { + return false; + } + + std::string line; + while (std::getline(mapFile, line)) { + std::istringstream iss(line); + std::string image_name; + iss >> image_name; + std::unordered_set faces; + int face_index; + while (iss >> face_index) { + faces.insert(face_index); + } + visible_faces_map[image_name] = faces; + } + mapFile.close(); + + std::ifstream relativeFile(basePath + "_face_visible_relative.txt"); + if (!relativeFile.is_open()) { + return false; + } + + while (std::getline(relativeFile, line)) { + int face_index = std::stoi(line); + face_visible_relative.insert(face_index); + } + relativeFile.close(); + + std::ifstream mapFile2(basePath + "_edge_faces_map.txt"); + if (!mapFile2.is_open()) { + return false; + } + + while (std::getline(mapFile2, line)) { + std::istringstream iss(line); + std::string image_name; + iss >> image_name; + std::unordered_set faces; + int face_index; + while (iss >> face_index) { + faces.insert(face_index); + } + edge_faces_map[image_name] = faces; + } + mapFile2.close(); + + std::ifstream mapFile3(basePath + "_delete_edge_faces_map.txt"); + if (!mapFile3.is_open()) { + return false; + } + + while (std::getline(mapFile3, line)) { + std::istringstream iss(line); + std::string image_name; + iss >> image_name; + std::unordered_set faces; + int face_index; + while (iss >> face_index) { + faces.insert(face_index); + } + delete_edge_faces_map[image_name] = faces; + } + mapFile3.close(); + + return true; +} + // texture mesh // - minCommonCameras: generate texture patches using virtual faces composed of coplanar triangles sharing at least this number of views (0 - disabled, 3 - good value) // - fSharpnessWeight: sharpness weight to be applied on the texture (0 - disabled, 0.5 - good value) @@ -7691,6 +8625,23 @@ bool Scene::TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsi MeshTexture texture(*this, nResolutionLevel, nMinResolution); printf("baseFileName=%s\n", baseFileName.c_str()); + + /* + std::filesystem::path path(baseFileName.c_str()); + std::string parentPath = path.parent_path().string(); // 获取父目录 + + String altTexPath = String(parentPath) + "/mesh_material_0_map_Kd2.png"; + printf("altTexPath=%s\n", altTexPath.c_str()); + // 加载备用纹理 + Image8U3 altTex; + if (!altTex.Load(altTexPath)) { + // 如果加载失败,可以输出警告,但不中断流程 + DEBUG_EXTRA("Warning: Failed to load alternative texture mesh_material_0_map_Kd2.png"); + } else { + texture.alternativeTexture = &altTex; + } + //*/ + std::string id; // 1. 查找最后一个 '/' 的位置 @@ -7715,65 +8666,120 @@ bool Scene::TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsi } printf("id=%s\n", id.c_str()); - #ifdef MASK_FACE_OCCLUSION - // 创建遮挡数据 - try { - py::scoped_interpreter guard{}; // 自动管理解释器生命周期 - py::module_ sys = py::module_::import("sys"); - // 设置命令行参数(模拟Python的argparse) - py::list argv; - argv.append("program_name"); - argv.append("--id"); - // argv.append("274658"); // 274658 7613212046 - argv.append(id); - // argv.append("--mask_image"); - // argv.append("63_2"); - sys.attr("argv") = argv; + std::string basePath = ""; + size_t lastSlash = baseFileName.find_last_of('/'); + size_t secondLastSlash = baseFileName.find_last_of('/', lastSlash - 1); + if (secondLastSlash == std::string::npos) + basePath = baseFileName; + basePath = baseFileName.substr(0, secondLastSlash + 1); - py::print(sys.attr("version")); // 打印Python版本 - sys.attr("path").attr("append")("/home/algo/.conda/envs/py310_pyt210/lib/python3.10/site-packages"); - sys.attr("path").attr("append")("/home/algo/Documents/openMVS/openMVS/libs/MVS"); - // 调用自定义函数 - py::module_ mymodule = py::module_::import("mask_face_occlusion"); + // printf("basePath=%s\n", basePath.c_str()); - // 获取ModelProcessor类 - py::object ModelProcessor = mymodule.attr("ModelProcessor"); +#ifdef CACHE_MASK + if (!LoadVisibleFacesData(visible_faces_map, face_visible_relative, edge_faces_map, delete_edge_faces_map, basePath)) +#endif - py::object processor = ModelProcessor(); - py::dict result = processor.attr("process")().cast(); + { + // 创建遮挡数据 + try { + py::scoped_interpreter guard{}; // 自动管理解释器生命周期 + py::module_ sys = py::module_::import("sys"); + + // 设置命令行参数(模拟Python的argparse) + py::list argv; + argv.append("program_name"); + argv.append("--id"); + // argv.append("274658"); // 274658 7613212046 + argv.append(id); + // argv.append("--mask_image"); + // argv.append("63_2"); + sys.attr("argv") = argv; + + py::print(sys.attr("version")); // 打印Python版本 + sys.attr("path").attr("append")("/home/algo/.conda/envs/py310_pyt210/lib/python3.10/site-packages"); + sys.attr("path").attr("append")("/home/algo/Documents/openMVS/openMVS/libs/MVS"); + // 调用自定义函数 + py::module_ mymodule = py::module_::import("mask_face_occlusion"); + + // 获取ModelProcessor类 + py::object ModelProcessor = mymodule.attr("ModelProcessor"); + + py::object processor = ModelProcessor(); + py::dict result = processor.attr("process")().cast(); + + py::dict dict1; + py::dict dict2; + if (result.contains("result1") && result.contains("result2")) { + dict1 = result["result1"].cast(); + dict2 = result["result2"].cast(); + } + py::dict dict3; + if (result.contains("result3")) { + dict3 = result["result3"].cast(); + } + + printf("dict1 size=%d, dict2 size=%d, dict3 size=%d\n", dict1.size(), dict2.size(), dict3.size()); + + // 处理返回的可见面字典 + for (auto item : dict1) { + std::string image_name = item.first.cast(); + printf("dict1 mask image name=%s\n", image_name.c_str()); + py::list visible_faces = item.second.cast(); + + std::unordered_set face_set; + for (auto face : visible_faces) { + face_set.insert(face.cast()); + } + visible_faces_map[image_name] = face_set; + } - printf("result size=%d\n", result.size()); + for (const auto& entry : visible_faces_map) + { + face_visible_relative.insert(entry.second.begin(), entry.second.end()); + } - // 处理返回的可见面字典 - for (auto item : result) { - std::string image_name = item.first.cast(); - printf("mask image name=%s\n", image_name.c_str()); - py::list visible_faces = item.second.cast(); - - std::unordered_set face_set; - for (auto face : visible_faces) { - face_set.insert(face.cast()); + for (auto item : dict2) { + std::string image_name = item.first.cast(); + printf("dict2 mask image name=%s\n", image_name.c_str()); + py::list edge_faces = item.second.cast(); + + std::unordered_set face_set; + for (auto face : edge_faces) { + face_set.insert(face.cast()); + } + edge_faces_map[image_name] = face_set; } - visible_faces_map[image_name] = face_set; - } - for (const auto& entry : visible_faces_map) - { - face_visible_relative.insert(entry.second.begin(), entry.second.end()); + for (auto item : dict3) { + std::string image_name = item.first.cast(); + printf("dict3 mask image name=%s\n", image_name.c_str()); + py::list delete_edge_faces = item.second.cast(); + + std::unordered_set face_set; + for (auto face : delete_edge_faces) { + face_set.insert(face.cast()); + } + delete_edge_faces_map[image_name] = face_set; + } + +#ifdef CACHE_MASK + SaveVisibleFacesData(visible_faces_map, face_visible_relative, edge_faces_map, delete_edge_faces_map, basePath); +#endif + } + catch (const py::error_already_set &e) { + std::cerr << "Python error: " << e.what() << std::endl; + // 获取详细的Python错误信息 + PyErr_Print(); + return 1; } - } - catch (const py::error_already_set &e) { - std::cerr << "Python error: " << e.what() << std::endl; - // 获取详细的Python错误信息 - PyErr_Print(); - return 1; - } - catch (const std::exception &e) { - std::cerr << "C++ error: " << e.what() << std::endl; - return 1; - } + catch (const std::exception &e) { + std::cerr << "C++ error: " << e.what() << std::endl; + return 1; + } + } + #endif // assign the best view to each face @@ -7842,6 +8848,48 @@ bool Scene::TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsi DEBUG_EXTRA("Generating texture atlas and image completed: %u patches, %u image size, %u textures (%s)", texture.texturePatches.size(), mesh.texturesDiffuse[0].width(), mesh.texturesDiffuse.size(), TD_TIMER_GET_FMT().c_str()); } + #ifdef DISPLAY_DEMO + try { + py::scoped_interpreter guard{}; // 自动管理解释器生命周期 + py::module_ sys = py::module_::import("sys"); + + // 设置命令行参数(模拟Python的argparse) + py::list argv; + argv.append("program_name"); + argv.append("--id"); + argv.append(id); + argv.append("--mask"); + #ifdef MASK_FACE_OCCLUSION + argv.append(1); + #else + argv.append(0); + #endif + sys.attr("argv") = argv; + + py::print(sys.attr("version")); // 打印Python版本 + sys.attr("path").attr("append")("/home/algo/.conda/envs/py310_pyt210/lib/python3.10/site-packages"); + sys.attr("path").attr("append")("/home/algo/Documents/openMVS/openMVS/libs/MVS"); + // 调用自定义函数 + py::module_ mymodule = py::module_::import("display_demo"); + + // 获取ModelProcessor类 + py::object DisplayProcessor = mymodule.attr("DisplayProcessor"); + + py::object processor = DisplayProcessor(); + processor.attr("load_and_show")(); + } + catch (const py::error_already_set &e) { + std::cerr << "Python error: " << e.what() << std::endl; + // 获取详细的Python错误信息 + PyErr_Print(); + return 1; + } + catch (const std::exception &e) { + std::cerr << "C++ error: " << e.what() << std::endl; + return 1; + } +#endif + return true; } // TextureMesh @@ -7860,7 +8908,6 @@ bool Scene::is_face_visible(const std::string& image_name, int face_index) { bool Scene::is_face_visible_relative(int face_index) { - #ifndef MASK_FACE_OCCLUSION return true; #endif @@ -7868,6 +8915,51 @@ bool Scene::is_face_visible_relative(int face_index) return face_visible_relative.contains(face_index); } + +bool Scene::is_face_edge(const std::string& image_name, int face_index) { + +#ifndef MASK_FACE_OCCLUSION + return true; +#endif + + auto it = edge_faces_map.find(image_name); + if (it != edge_faces_map.end()) { + // printf("is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); + + for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { + // std::cout << *it2 << " "; + } + // std::cout << std::endl; + + // if (it->second.find(face_index) != it->second.end()) + // printf("find is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); + return it->second.find(face_index) != it->second.end(); + } + return false; +} + +bool Scene::is_face_delete_edge(const std::string& image_name, int face_index) { + +#ifndef MASK_FACE_OCCLUSION + return true; +#endif + + auto it = delete_edge_faces_map.find(image_name); + if (it != delete_edge_faces_map.end()) { + // printf("is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); + + for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { + // std::cout << *it2 << " "; + } + // std::cout << std::endl; + + // if (it->second.find(face_index) != it->second.end()) + // printf("find is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); + return it->second.find(face_index) != it->second.end(); + } + return false; +} + void Scene::SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold) { // 确保网格数据有效 if (mesh.faces.empty() || mesh.vertices.empty() || diff --git a/libs/MVS/__pycache__/display_demo.cpython-310.pyc b/libs/MVS/__pycache__/display_demo.cpython-310.pyc new file mode 100644 index 0000000..298e967 Binary files /dev/null and b/libs/MVS/__pycache__/display_demo.cpython-310.pyc differ diff --git a/libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc b/libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc index 29ae81b..7e82d04 100644 Binary files a/libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc and b/libs/MVS/__pycache__/mask_face_occlusion.cpython-310.pyc differ diff --git a/libs/MVS/display_demo.py b/libs/MVS/display_demo.py new file mode 100644 index 0000000..5d10593 --- /dev/null +++ b/libs/MVS/display_demo.py @@ -0,0 +1,77 @@ +import open3d as o3d +import os +import argparse +import numpy as np +from PIL import Image + +class DisplayProcessor: + def __init__(self): + + # argv = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] + parser = argparse.ArgumentParser() + + parser.add_argument( + "--id", + required=True, + ) + parser.add_argument( + "--mask", + default=0 + ) + + args = parser.parse_args() + + self.id = args.id + mask = args.mask + out_dir = f"out.{self.id}.nomask" + if mask==1: + out_dir = f"out.{self.id}" + + self.mesh = None + self.obj_path = f"/home/algo/Documents/openMVS/data/{self.id}/{out_dir}/mesh.obj" + + self.load_and_show() + + def load_and_show(self): + # 加载并变换所有模型 + + # 加载网格 + mesh = None + try: + mesh = o3d.io.read_triangle_mesh(self.obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {self.obj_path}") + except Exception as e: + print(f"加载模型失败: {self.obj_path} - {e}") + + if not mesh: + print("没有加载到任何模型,请检查错误信息") + else: + # 可视化模型 + print("显示模型... (按'Q'退出)") + + try: + from packaging import version + o3d_version = version.parse(o3d.__version__) + # 新版本 draw_geometries 参数 + if o3d_version >= version.parse("0.13.0"): + o3d.visualization.draw_geometries( + [mesh], + window_name="模型展示", + mesh_show_back_face=True, + mesh_show_wireframe=False + ) + # 旧版本 draw_geometries 参数 + else: + o3d.visualization.draw_geometries( + [mesh], + window_name="模型展示", + point_show_normal=False, + mesh_show_back_face=True + ) + except Exception as e: + print(f"使用 draw_geometries 可视化失败: {e}") + +# 主程序 +if __name__ == "__main__": + DisplayProcessor().load_and_show() diff --git a/libs/MVS/mask_face_occlusion.py b/libs/MVS/mask_face_occlusion.py index b71095e..a2bc440 100755 --- a/libs/MVS/mask_face_occlusion.py +++ b/libs/MVS/mask_face_occlusion.py @@ -105,7 +105,7 @@ class ModelProcessor: if i != j and j not in self.face_adjacency[i]: self.face_adjacency[i].append(j) - def _expand_face_visibility(self, face_visibility): + def _expand_face_visibility(self, face_visibility, shrink_radius = 1): if self.face_adjacency is None: return face_visibility.copy() @@ -120,7 +120,7 @@ class ModelProcessor: queue.append((face_idx, 0)) # (面片索引, 当前扩展层数) visited.add(face_idx) - self.expand_radius = 10 + self.expand_radius = shrink_radius # 广度优先扩展 while queue: current_idx, current_radius = queue.popleft() @@ -214,6 +214,7 @@ class ModelProcessor: # 多级遮挡检测 visible_mask, occlusion_mask = self._hierarchical_occlusion_test( + # visible_mask, occlusion_mask, vertex_depth_difference = self._hierarchical_occlusion_test2( vertices_cam[frustum_mask], depth_pyramid, (fx, fy, cx, cy), @@ -225,7 +226,11 @@ class ModelProcessor: final_occlusion = np.zeros(len(vertices), dtype=bool) final_occlusion[frustum_mask] = occlusion_mask + + # final_vertex_difference = np.zeros(len(vertices), dtype=bool) + # final_vertex_difference[frustum_mask] = vertex_depth_difference + # return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices), final_vertex_difference.tolist() return final_visible.tolist(), self._occlusion_expansion(final_occlusion, vertices) def _build_depth_pyramid2(self, depth_map, levels=4): @@ -319,6 +324,197 @@ class ModelProcessor: final_occlusion[valid_mask] = occlusion return final_visible, final_occlusion + + def _hierarchical_occlusion_test2(self, vertices_cam, depth_pyramid, intrinsics, img_size): + """层级式遮挡检测(安全版本)""" + fx, fy, cx, cy = intrinsics + height, width = img_size + + # 1. 过滤无效顶点 + valid_mask = vertices_cam[:, 2] > 1e-6 + vertices_valid = vertices_cam[valid_mask] + if len(vertices_valid) == 0: + return (np.zeros(len(vertices_cam), dtype=bool), + np.zeros(len(vertices_cam), dtype=bool), + np.zeros(len(vertices_cam))) # 返回空的深度差值数组 + + visible = np.zeros(len(vertices_valid), dtype=bool) + occlusion = np.zeros(len(vertices_valid), dtype=bool) + + # 用于存储每个像素点的深度范围(最小值和最大值) + pixel_depth_min = {} + pixel_depth_max = {} + + # 2. 层级检测 + for level in reversed(range(len(depth_pyramid))): + scale = 2 ** level + current_depth = depth_pyramid[level] + h, w = current_depth.shape + + # 安全构造内参矩阵 + K = np.array([ + [max(fx/(scale + 1e-6), 1e-6), 0, (cx - 0.5)/scale + 0.5], + [0, max(fy/(scale + 1e-6), 1e-6), (cy - 0.5)/scale + 0.5], + [0, 0, 1] + ], dtype=np.float32) + + # 投影计算 + uv_homo = (K @ vertices_valid.T).T + uv = uv_homo[:, :2] / uv_homo[:, 2:3] + + # 安全边界处理 + u = np.clip(uv[:, 0], 0.0, float(w-1)) + v = np.clip(uv[:, 1], 0.0, float(h-1)) + + # 转换为整数索引 + u_idx = np.clip(np.floor(u).astype(np.int32), 0, w-1) + v_idx = np.clip(np.floor(v).astype(np.int32), 0, h-1) + + # 采样深度值 + depth_vals = current_depth[v_idx, u_idx] + + # 只在最高分辨率层级(level=0)记录像素深度范围 + if level == 0: + for i in range(len(u_idx)): + pixel_key = (u_idx[i], v_idx[i]) + vertex_depth = vertices_valid[i, 2] + + # 更新像素的最小深度值 + if pixel_key not in pixel_depth_min or vertex_depth < pixel_depth_min[pixel_key]: + pixel_depth_min[pixel_key] = vertex_depth + + # 更新像素的最大深度值 + if pixel_key not in pixel_depth_max or vertex_depth > pixel_depth_max[pixel_key]: + pixel_depth_max[pixel_key] = vertex_depth + + # 深度比较 + level_tol = 0.0008 * (2 ** level) # 0.005 0.0008 + visible |= (vertices_valid[:, 2] <= (depth_vals + level_tol)) + occlusion |= (vertices_valid[:, 2] > (depth_vals + level_tol)) + + # 计算每个像素的深度差值(最大深度 - 最小深度) + pixel_depth_difference = {} + for pixel_key in pixel_depth_min: + if pixel_key in pixel_depth_max: + pixel_depth_difference[pixel_key] = pixel_depth_max[pixel_key] - pixel_depth_min[pixel_key] + + # 为每个顶点分配对应的像素点深度差值 + vertex_depth_difference = np.zeros(len(vertices_cam)) + if level == 0: # 确保我们记录了深度范围 + for i in range(len(vertices_valid)): + pixel_key = (u_idx[i], v_idx[i]) + if pixel_key in pixel_depth_difference: + # 找到原始顶点索引 + orig_idx = np.where(valid_mask)[0][i] + vertex_depth_difference[orig_idx] = pixel_depth_difference[pixel_key] + + # 3. 结果映射 + final_visible = np.zeros(len(vertices_cam), dtype=bool) + final_visible[valid_mask] = visible + + final_occlusion = np.zeros(len(vertices_cam), dtype=bool) + final_occlusion[valid_mask] = occlusion + + return final_visible, final_occlusion, vertex_depth_difference + + def _hierarchical_occlusion_test3(self, vertices_cam, depth_pyramid, intrinsics, img_size): + """层级式遮挡检测(安全版本)""" + fx, fy, cx, cy = intrinsics + height, width = img_size + + # 1. 过滤无效顶点 + valid_mask = vertices_cam[:, 2] > 1e-6 + vertices_valid = vertices_cam[valid_mask] + if len(vertices_valid) == 0: + return (np.zeros(len(vertices_cam), dtype=bool), + np.zeros(len(vertices_cam), dtype=bool), + {}) # 返回空的深度差值字典 + + visible = np.zeros(len(vertices_valid), dtype=bool) + occlusion = np.zeros(len(vertices_valid), dtype=bool) + + # 用于存储每个像素点的深度范围 + pixel_depth_range = {} + + # 用于存储每个顶点对应的像素坐标和深度差值 + vertex_pixel_info = {} + + # 2. 层级检测 + for level in reversed(range(len(depth_pyramid))): + scale = 2 ** level + current_depth = depth_pyramid[level] + h, w = current_depth.shape + + # 安全构造内参矩阵 + K = np.array([ + [max(fx/(scale + 1e-6), 1e-6), 0, (cx - 0.5)/scale + 0.5], + [0, max(fy/(scale + 1e-6), 1e-6), (cy - 0.5)/scale + 0.5], + [0, 0, 1] + ], dtype=np.float32) + + # 投影计算 + uv_homo = (K @ vertices_valid.T).T + uv = uv_homo[:, :2] / uv_homo[:, 2:3] + + # 安全边界处理 + u = np.clip(uv[:, 0], 0.0, float(w-1)) + v = np.clip(uv[:, 1], 0.0, float(h-1)) + + # 转换为整数索引 + u_idx = np.clip(np.floor(u).astype(np.int32), 0, w-1) + v_idx = np.clip(np.floor(v).astype(np.int32), 0, h-1) + + # 采样深度值 + depth_vals = current_depth[v_idx, u_idx] + + # 记录每个像素点的深度范围(只在最高分辨率层级记录) + # if level == 0: # 只在原始分辨率层级记录 + if True: + for i in range(len(u_idx)): + vertex_idx = np.where(valid_mask)[0][i] # 获取原始顶点索引 + pixel_key = (u_idx[i], v_idx[i]) + + # 记录顶点对应的像素坐标 + vertex_pixel_info[vertex_idx] = pixel_key + + # 记录像素点的深度范围 + if pixel_key not in pixel_depth_range: + pixel_depth_range[pixel_key] = { + 'min': vertices_valid[i, 2], # 顶点深度 + 'max': vertices_valid[i, 2], # 顶点深度 + 'count': 1 + } + else: + pixel_depth_range[pixel_key]['min'] = min( + pixel_depth_range[pixel_key]['min'], vertices_valid[i, 2]) + pixel_depth_range[pixel_key]['max'] = max( + pixel_depth_range[pixel_key]['max'], vertices_valid[i, 2]) + pixel_depth_range[pixel_key]['count'] += 1 + + # 深度比较 + level_tol = 0.0008 * (2 ** level) # 0.005 0.0008 + visible |= (vertices_valid[:, 2] <= (depth_vals + level_tol)) + occlusion |= (vertices_valid[:, 2] > (depth_vals + level_tol)) + + # 计算每个像素点的深度差值 + pixel_depth_difference = {} + for pixel_key, depth_range in pixel_depth_range.items(): + pixel_depth_difference[pixel_key] = depth_range['max'] - depth_range['min'] + + # 为每个顶点分配对应的像素点深度差值 + vertex_depth_difference = np.zeros(len(vertices_cam)) + for vertex_idx, pixel_key in vertex_pixel_info.items(): + if pixel_key in pixel_depth_difference: + vertex_depth_difference[vertex_idx] = pixel_depth_difference[pixel_key] + + # 3. 结果映射 + final_visible = np.zeros(len(vertices_cam), dtype=bool) + final_visible[valid_mask] = visible + + final_occlusion = np.zeros(len(vertices_cam), dtype=bool) + final_occlusion[valid_mask] = occlusion + + return final_visible, final_occlusion, vertex_depth_difference def _occlusion_expansion(self, occlusion_mask, vertices, radius=0.0008): """基于空间哈希的快速遮挡扩展""" @@ -393,6 +589,7 @@ class ModelProcessor: R = self.qvec2rotmat(camera_data['qvec']).T eye = -R @ camera_data['tvec'] # eye = camera_data['tvec'] + # final_visible_list, final_occlusion_list, final_vertex_difference_list = self._compute_vertex_in_frustum( final_visible_list, final_occlusion_list = self._compute_vertex_in_frustum( camera_data['fx'], camera_data['fy'], camera_data['cx'], camera_data['cy'], @@ -443,11 +640,12 @@ class ModelProcessor: output_path = f"{self.asset_dir}/mesh_{self.id}_脸部遮挡判断.ply" o3d.io.write_triangle_mesh(output_path, self.mesh) print(f"Processing completed. Results saved to {output_path}") - """ + #""" # 获取三角形面片数组 triangles = np.asarray(self.mesh.triangles) face_visible_bitmap = np.zeros(len(triangles), dtype=bool) + # face_edge_bitmap = np.zeros(len(triangles), dtype=bool) # 遍历所有面片 for face_idx, face in enumerate(triangles): @@ -459,11 +657,266 @@ class ModelProcessor: final_visible_list[v2] ]) + # threshold = 0.5 + # face_edge_bitmap[face_idx] = all([ # any all + # final_vertex_difference_list[v0] < threshold, + # final_vertex_difference_list[v1] < threshold, + # final_vertex_difference_list[v2] < threshold + # ]) + # return face_visible_bitmap - # expanded_visibility = self._expand_face_visibility(face_visible_bitmap) - # return expanded_visibility - shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10 - return shrunk_visibility + shrunk_visibility = self._shrink_face_visibility(face_visible_bitmap, 6) # 6 10 + + # 16,13;13,16;16,16 + expanded_visibility = self._expand_face_visibility(face_visible_bitmap, 30) + shrunk_visibility2 = self._shrink_face_visibility(face_visible_bitmap, 50) + expanded_edge = expanded_visibility & ~shrunk_visibility2 + delete_edge = face_visible_bitmap & ~shrunk_visibility + + """ + # 创建顶点可见性映射(基于面片可见性) + vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) + # 遍历所有面片,将可见面片的顶点标记为可见 + for face_idx, face in enumerate(triangles): + # if expanded_edge[face_idx] and face_edge_bitmap[face_idx] : + if delete_edge[face_idx] : + vertex_visibility[face[0]] = True + vertex_visibility[face[1]] = True + vertex_visibility[face[2]] = True + + + vertices = np.asarray(self.mesh.vertices) + vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} + + vertex_colors = np.asarray(self.mesh.vertex_colors) + if face_points==None: + for vertex_id, coord in enumerate(self.mesh.vertices): + if vertex_visibility[vertex_id]: + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + else: + list_id = [] + # sorted_verts = self.sort_vertices(self.mesh.vertices) + sorted_verts =sorted( + (tuple(v.tolist()) for v in vertices), + key=lambda v: (v[0], v[1], v[2]) + ) + dict_s_o = {} + dict_o_s = {} + for sorted_idx, sorted_v in enumerate(sorted_verts): + original_idx = vertex_index_map[sorted_v] + dict_s_o[sorted_idx] = original_idx + dict_o_s[original_idx] = sorted_idx + + for vertex_id, coord in enumerate(self.mesh.vertices): + # print(vertex_id, coord) + if vertex_visibility[vertex_id]: + if dict_o_s[vertex_id] in face_points: + list_id.append(dict_o_s[vertex_id]) + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + #""" + + """ + # 创建顶点可见性映射(基于面片可见性) + vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) + # 遍历所有面片,将可见面片的顶点标记为可见 + for face_idx, face in enumerate(triangles): + if expanded_visibility[face_idx]: + vertex_visibility[face[0]] = True + vertex_visibility[face[1]] = True + vertex_visibility[face[2]] = True + + + vertices = np.asarray(self.mesh.vertices) + vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} + + vertex_colors = np.asarray(self.mesh.vertex_colors) + if face_points==None: + for vertex_id, coord in enumerate(self.mesh.vertices): + if vertex_visibility[vertex_id]: + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + else: + list_id = [] + # sorted_verts = self.sort_vertices(self.mesh.vertices) + sorted_verts =sorted( + (tuple(v.tolist()) for v in vertices), + key=lambda v: (v[0], v[1], v[2]) + ) + dict_s_o = {} + dict_o_s = {} + for sorted_idx, sorted_v in enumerate(sorted_verts): + original_idx = vertex_index_map[sorted_v] + dict_s_o[sorted_idx] = original_idx + dict_o_s[original_idx] = sorted_idx + + for vertex_id, coord in enumerate(self.mesh.vertices): + # print(vertex_id, coord) + if vertex_visibility[vertex_id]: + if dict_o_s[vertex_id] in face_points: + list_id.append(dict_o_s[vertex_id]) + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_expanded.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + #""" + + """ + # 创建顶点可见性映射(基于面片可见性) + vertex_visibility = np.zeros(len(self.mesh.vertices), dtype=bool) + # 遍历所有面片,将可见面片的顶点标记为可见 + for face_idx, face in enumerate(triangles): + if shrunk_visibility2[face_idx]: + vertex_visibility[face[0]] = True + vertex_visibility[face[1]] = True + vertex_visibility[face[2]] = True + + + vertices = np.asarray(self.mesh.vertices) + vertex_index_map = {tuple(v.tolist()): i for i, v in enumerate(vertices)} + + vertex_colors = np.asarray(self.mesh.vertex_colors) + if face_points==None: + for vertex_id, coord in enumerate(self.mesh.vertices): + if vertex_visibility[vertex_id]: + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + else: + list_id = [] + # sorted_verts = self.sort_vertices(self.mesh.vertices) + sorted_verts =sorted( + (tuple(v.tolist()) for v in vertices), + key=lambda v: (v[0], v[1], v[2]) + ) + dict_s_o = {} + dict_o_s = {} + for sorted_idx, sorted_v in enumerate(sorted_verts): + original_idx = vertex_index_map[sorted_v] + dict_s_o[sorted_idx] = original_idx + dict_o_s[original_idx] = sorted_idx + + for vertex_id, coord in enumerate(self.mesh.vertices): + # print(vertex_id, coord) + if vertex_visibility[vertex_id]: + if dict_o_s[vertex_id] in face_points: + list_id.append(dict_o_s[vertex_id]) + vertex_colors[vertex_id] = [1.0, 0.0, 0.0] + + # 保存最终模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_shrunk.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Processing completed. Results saved to {output_path}") + #""" + + return shrunk_visibility, expanded_edge, delete_edge + + def _flag_contour(self, camera_data, face_points): + """标记可见顶点""" + vertex_visible = [] + vertex_occlusion = [] + depth_images = [] + + render = o3d.visualization.rendering.OffscreenRenderer(camera_data['width'], camera_data['height']) + + material = o3d.visualization.rendering.MaterialRecord() + render.scene.add_geometry("mesh", self.mesh, material) + + # 生成深度图 + depth_image = self._gen_depth_image(camera_data, render) + + # 获取相机参数 + fx = camera_data['fx'] + fy = camera_data['fy'] + cx = camera_data['cx'] + cy = camera_data['cy'] + height = camera_data['height'] + width = camera_data['width'] + + # 计算顶点在相机空间中的坐标 + w2c = get_w2c(camera_data['qvec'], camera_data['tvec']) + vertices = np.asarray(self.mesh.vertices) + vertices_homo = np.hstack([vertices, np.ones((len(vertices), 1))]) + vertices_cam = (w2c @ vertices_homo.T).T[:, :3] + + # 过滤掉相机后面的顶点 + valid_mask = vertices_cam[:, 2] > 0 + vertices_valid = vertices_cam[valid_mask] + + # 投影顶点到图像平面 + u = (vertices_valid[:, 0] * fx / vertices_valid[:, 2] + cx) + v = (vertices_valid[:, 1] * fy / vertices_valid[:, 2] + cy) + u_idx = np.clip(np.floor(u).astype(int), 0, width-1) + v_idx = np.clip(np.floor(v).astype(int), 0, height-1) + + # 初始化 min_depth_map 和 max_depth_map + min_depth_map = np.full((height, width), np.inf) + max_depth_map = np.zeros((height, width)) + + # 更新 min_depth_map 和 max_depth_map + for i in range(len(vertices_valid)): + x = u_idx[i] + y = v_idx[i] + d = vertices_valid[i, 2] + if d < min_depth_map[y, x]: + min_depth_map[y, x] = d + if d > max_depth_map[y, x]: + max_depth_map[y, x] = d + + # 对于每个顶点,检查深度范围 + edge_vertices = np.zeros(len(vertices), dtype=bool) + threshold = 3 # 阈值,可根据需要调整 + for i in range(len(vertices_valid)): + x = u_idx[i] + y = v_idx[i] + if min_depth_map[y, x] < np.inf: # 确保有数据 + depth_range = max_depth_map[y, x] - min_depth_map[y, x] + if depth_range > threshold: + # 找到原始顶点索引 + orig_idx = np.where(valid_mask)[0][i] + edge_vertices[orig_idx] = False + + # 标记边缘顶点 + vertex_colors = np.asarray(self.mesh.vertex_colors) + for i in range(len(vertices)): + if edge_vertices[i]: + vertex_colors[i] = [1.0, 0.0, 0.0] # 红色表示边缘 + + # 保存模型 + output_path = f"{self.asset_dir}/mesh_{self.id}_edge.ply" + o3d.io.write_triangle_mesh(output_path, self.mesh) + print(f"Edge detection completed. Results saved to {output_path}") + + # 计算面片的边缘性 + triangles = np.asarray(self.mesh.triangles) + face_edge = np.zeros(len(triangles), dtype=bool) + for face_idx, face in enumerate(triangles): + if any(edge_vertices[face]): + face_edge[face_idx] = True + + # 为了兼容原有代码,返回面片可见性和边缘性 + # 注意:这里face_visible_bitmap未定义,但原有代码可能期望返回两个值 + # 如果需要面片可见性,可以保留原有逻辑,但这里简化处理 + face_visible_bitmap = np.ones(len(triangles), dtype=bool) # 临时填充 + return face_visible_bitmap, face_edge """ def _mask_face_occlusion(self): @@ -523,7 +976,11 @@ class ModelProcessor: return self._flag_model(camera_data, None) """ + countour_faces_dict = {} visible_faces_dict = {} + edge_faces_dict = {} + delete_edge_faces_dict = {} + n = 0 for img in images.values(): camera = cameras[img.camera_id] camera_data = { @@ -538,14 +995,20 @@ class ModelProcessor: "name": img.name[:-4] } img_name = img.name[:-4] - print("img_name=", img_name) + print("img_name=", img_name, n) # if (img_name!="72_2" and img_name!="82_2" and img_name!="83_2"): # 82_2 72_2 - #if (img_name!="82_2"): + # if (img_name!="74_8"): # continue - face_visibility = self._flag_model(camera_data, None) + # face_visibility2, face_contour = self._flag_contour(camera_data, None) + # countour_faces_dict[img.name[:-4]] = np.where(face_contour)[0].tolist() + face_visibility, face_edge, face_delete_edge = self._flag_model(camera_data, None) visible_faces_dict[img.name[:-4]] = np.where(face_visibility)[0].tolist() + edge_faces_dict[img.name[:-4]] = np.where(face_edge)[0].tolist() + delete_edge_faces_dict[img.name[:-4]] = np.where(face_delete_edge)[0].tolist() + n += 1 - return visible_faces_dict + return {"result1": visible_faces_dict, "result2": edge_faces_dict, "result3": delete_edge_faces_dict} + # return {"result1": visible_faces_dict, "result2": countour_faces_dict} def process(self):