/* * SceneTexture.cpp * * Copyright (c) 2014-2015 SEACAVE * * Author(s): * * cDc * * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . * * * Additional Terms: * * You are required to preserve legal notices and author attributions in * that material or in the Appropriate Legal Notices displayed by works * containing it. */ #include "Common.h" #include "Scene.h" #include "RectsBinPack.h" // connected components #include #include #include // #include // #include #include "ConfigEnv.h" #include "cuda/MeshTextureCUDA.h" #include #include namespace fs = std::filesystem; // namespace py = pybind11; using namespace MVS; // D E F I N E S /////////////////////////////////////////////////// // uncomment to enable multi-threading based on OpenMP #ifdef _USE_OPENMP #define TEXOPT_USE_OPENMP #endif // uncomment to use SparseLU for solving the linear systems // (should be faster, but not working on old Eigen) #if !defined(EIGEN_DEFAULT_TO_ROW_MAJOR) || EIGEN_WORLD_VERSION>3 || (EIGEN_WORLD_VERSION==3 && EIGEN_MAJOR_VERSION>2) #define TEXOPT_SOLVER_SPARSELU #endif // method used to try to detect outlier face views // (should enable more consistent textures, but it is not working) #define TEXOPT_FACEOUTLIER_NA 0 #define TEXOPT_FACEOUTLIER_MEDIAN 1 #define TEXOPT_FACEOUTLIER_GAUSS_DAMPING 2 #define TEXOPT_FACEOUTLIER_GAUSS_CLAMPING 3 #define TEXOPT_FACEOUTLIER TEXOPT_FACEOUTLIER_GAUSS_CLAMPING // method used to find optimal view per face #define TEXOPT_INFERENCE_LBP 1 #define TEXOPT_INFERENCE_TRWS 2 #define TEXOPT_INFERENCE TEXOPT_INFERENCE_LBP #define MASK_FACE_OCCLUSION // #define USE_CUDA // inference algorithm #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP #include "../Math/LBP.h" namespace MVS { typedef LBPInference::NodeID NodeID; // Potts model as smoothness function LBPInference::EnergyType STCALL SmoothnessPotts(LBPInference::NodeID, LBPInference::NodeID, LBPInference::LabelID l1, LBPInference::LabelID l2) { return l1 == l2 && l1 != 0 && l2 != 0 ? LBPInference::EnergyType(0) : LBPInference::EnergyType(LBPInference::MaxEnergy); } LBPInference::EnergyType STCALL SmoothnessLinear(LBPInference::NodeID, LBPInference::NodeID, LBPInference::LabelID l1, LBPInference::LabelID l2) { return std::abs((int)l1 - (int)l2) * 0.5f * LBPInference::MaxEnergy; } LBPInference::EnergyType STCALL NewSmoothness(LBPInference::NodeID, LBPInference::NodeID, LBPInference::LabelID l1, LBPInference::LabelID l2) { if(l1 == l2) return 0; if(l1 == 0 || l2 == 0) return LBPInference::MaxEnergy; // 保持与undefined的硬约束 return LBPInference::EnergyType(0.3f * LBPInference::MaxEnergy); // 降低不同视角间的惩罚 } } #endif #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS #include "../Math/TRWS/MRFEnergy.h" namespace MVS { // TRWS MRF energy using Potts model typedef unsigned NodeID; typedef unsigned LabelID; typedef TypePotts::REAL EnergyType; static const EnergyType MaxEnergy(1); struct TRWSInference { typedef MRFEnergy MRFEnergyType; typedef MRFEnergy::Options MRFOptions; CAutoPtr mrf; CAutoPtrArr nodes; inline TRWSInference() {} void Init(NodeID nNodes, LabelID nLabels) { mrf = new MRFEnergyType(TypePotts::GlobalSize(nLabels)); nodes = new MRFEnergyType::NodeId[nNodes]; } inline bool IsEmpty() const { return mrf == NULL; } inline void AddNode(NodeID n, const EnergyType* D) { nodes[n] = mrf->AddNode(TypePotts::LocalSize(), TypePotts::NodeData(D)); } inline void AddEdge(NodeID n1, NodeID n2) { mrf->AddEdge(nodes[n1], nodes[n2], TypePotts::EdgeData(MaxEnergy)); } EnergyType Optimize() { MRFOptions options; options.m_eps = 0.005; options.m_iterMax = 1000; #if 1 EnergyType lowerBound, energy; mrf->Minimize_TRW_S(options, lowerBound, energy); #else EnergyType energy; mrf->Minimize_BP(options, energy); #endif return energy; } inline LabelID GetLabel(NodeID n) const { return mrf->GetSolution(nodes[n]); } }; } #endif // S T R U C T S /////////////////////////////////////////////////// typedef Mesh::Vertex Vertex; typedef Mesh::VIndex VIndex; typedef Mesh::Face Face; typedef Mesh::FIndex FIndex; typedef Mesh::TexCoord TexCoord; typedef Mesh::TexIndex TexIndex; typedef int MatIdx; typedef Eigen::Triplet MatEntry; typedef Eigen::SparseMatrix SparseMat; enum Mask { empty = 0, border = 128, interior = 255 }; // 视图选择数据结构 struct ViewSelectionData { int viewID; // 视图ID float quality; // 视图质量评分 float consistency; // 与相邻面的视图一致性 cv::Rect patchBounds; // 纹理块边界 ViewSelectionData() : viewID(-1), quality(0.0f), consistency(0.0f) {} ViewSelectionData(int id, float q, float c) : viewID(id), quality(q), consistency(c) {} }; // 纹理块质量信息 struct PatchQualityInfo { float averageQuality; float minQuality; float maxQuality; std::vector faceQualities; }; std::vector patchQualityInfos; struct MeshTexture { // used to render the surface to a view camera typedef TImage FaceMap; struct RasterMesh : TRasterMesh { typedef TRasterMesh Base; FaceMap& faceMap; FIndex idxFace; Image8U mask; bool validFace; // Mesh _mesh; Mesh& _mesh; MeshTexture& meshTexture; DepthMap& depthMap; bool bProcessConsist = false; IIndex _idxView; // RasterMesh(MeshTexture& _meshTexture, const Mesh::VertexArr& _vertices, const Camera& _camera, DepthMap& _depthMap, FaceMap& _faceMap, Mesh mesh, bool bProcessConsist) // : Base(_vertices, _camera, _depthMap), meshTexture(_meshTexture), faceMap(_faceMap), _mesh(mesh), depthMap(_depthMap), bProcessConsist(bProcessConsist){} RasterMesh(MeshTexture& _meshTexture, const Mesh::VertexArr& _vertices, const Camera& _camera, DepthMap& _depthMap, FaceMap& _faceMap, const std::reference_wrapper meshWrapper, bool bProcessConsist) : Base(_vertices, _camera, _depthMap), meshTexture(_meshTexture), faceMap(_faceMap), _mesh(meshWrapper.get()), depthMap(_depthMap), bProcessConsist(bProcessConsist){} void Clear() { Base::Clear(); faceMap.memset((uint8_t)NO_ID); } void Raster(const ImageRef& pt, const Triangle& t, const Point3f& bary) { const Point3f pbary(PerspectiveCorrectBarycentricCoordinates(t, bary)); const Depth z(ComputeDepth(t, pbary)); ASSERT(z > Depth(0)); Depth& depth = depthMap(pt); // meshTexture.PerformLocalDepthConsistencyCheck(depthMap, faceMap, _mesh); for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { // printf("depthMap(r, c)=%f\n", depthMap(r, c)); } } std::lock_guard lock(*_mesh.invalidFaces.mtx); if (_mesh.invalidFaces.data.find(idxFace) != _mesh.invalidFaces.data.end()) { // validFace = false; // 标记为无效面片 // return; // 跳过渲染 } if (bProcessConsist && false) { float depthThreshold = 900.0f; // 0.1 // printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); // if (std::abs(depthMap(pt))!=0.0) // printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); // if (depthMap(pt)>600) // printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); // if (depthMap(pt) != 0 && std::abs(depthMap(pt) - z) > depthThreshold) { // if (depthMap(pt) != 0) { // if (depthMap(pt) != 0 && std::abs(depthMap(pt) - z) < depthThreshold) { // if (depthMap(pt) == 0) { if (depth != 0 && std::abs(depth - z) > depthThreshold) { validFace = false; // 标记为无效面片 // 标记相邻面片无效(扩展范围) int expansionRadius = 30; // 周围3个面片范围 std::queue faceQueue; std::unordered_set processedFaces; faceQueue.push(idxFace); processedFaces.insert(idxFace); while (!faceQueue.empty() && expansionRadius-- > 0) { FIndex currentFace = faceQueue.front(); faceQueue.pop(); // 获取当前面片的相邻面片 const Mesh::FaceFaces& adjFaces = _mesh.faceFaces[currentFace]; for (int i = 0; i < 3; ++i) { FIndex neighborFace = adjFaces[i]; if (neighborFace == NO_ID || processedFaces.find(neighborFace) != processedFaces.end()) continue; // 标记相邻面片无效 processedFaces.insert(neighborFace); faceQueue.push(neighborFace); // 在全局数组中标记为无效 #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { std::lock_guard lock(*_mesh.invalidFaces.mtx); _mesh.invalidFaces.data.insert(neighborFace); // _mesh.invalidFacesAll[_idxView].data.insert(neighborFace); } } } return; } } if (depth == 0 || depth > z) { depth = z; // printf("depth=%f\n",depth); faceMap(pt) = validFace && (validFace = (mask(pt) != 0)) ? idxFace : NO_ID; } } }; // used to represent a pixel color typedef Point3f Color; typedef CLISTDEF0(Color) Colors; // cList viewDepthMaps; // 存储每个视图的深度图 // used to store info about a face (view, quality) struct FaceData { IIndex idxView;// the view seeing this face float quality; // how well the face is seen by this view #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA Color color; // additionally store mean color (used to remove outliers) #endif bool bInvalidFacesRelative = false; }; typedef cList FaceDataArr; // store information about one face seen from several views typedef cList FaceDataViewArr; // store data for all the faces of the mesh typedef cList VirtualFaceIdxsArr; // store face indices for each virtual face // used to assign a view to a face typedef uint32_t Label; typedef cList LabelArr; // represents a texture patch struct TexturePatch { Label label; // view index Mesh::FaceIdxArr faces; // indices of the faces contained by the patch RectsBinPack::Rect rect; // the bounding box in the view containing the patch }; typedef cList TexturePatchArr; // used to optimize texture patches struct SeamVertex { struct Patch { struct Edge { uint32_t idxSeamVertex; // the other vertex of this edge FIndex idxFace; // the face containing this edge in this patch inline Edge() {} inline Edge(uint32_t _idxSeamVertex) : idxSeamVertex(_idxSeamVertex) {} inline bool operator == (uint32_t _idxSeamVertex) const { return (idxSeamVertex == _idxSeamVertex); } }; typedef cList Edges; uint32_t idxPatch; // the patch containing this vertex Point2f proj; // the projection of this vertex in this patch Edges edges; // the edges starting from this vertex, contained in this patch (exactly two for manifold meshes) inline Patch() {} inline Patch(uint32_t _idxPatch) : idxPatch(_idxPatch) {} inline bool operator == (uint32_t _idxPatch) const { return (idxPatch == _idxPatch); } }; typedef cList Patches; VIndex idxVertex; // the index of this vertex Patches patches; // the patches meeting at this vertex (two or more) inline SeamVertex() {} inline SeamVertex(uint32_t _idxVertex) : idxVertex(_idxVertex) {} inline bool operator == (uint32_t _idxVertex) const { return (idxVertex == _idxVertex); } Patch& GetPatch(uint32_t idxPatch) { const uint32_t idx(patches.Find(idxPatch)); if (idx == NO_ID) return patches.emplace_back(idxPatch); return patches[idx]; } inline void SortByPatchIndex(IndexArr& indices) const { indices.resize(patches.size()); std::iota(indices.Begin(), indices.End(), 0); std::sort(indices.Begin(), indices.End(), [&](IndexArr::Type i0, IndexArr::Type i1) -> bool { return patches[i0].idxPatch < patches[i1].idxPatch; }); } }; typedef cList SeamVertices; // used to iterate vertex labels struct PatchIndex { bool bIndex; union { uint32_t idxPatch; uint32_t idxSeamVertex; }; }; typedef CLISTDEF0(PatchIndex) PatchIndices; struct VertexPatchIterator { uint32_t idx; uint32_t idxPatch; const SeamVertex::Patches* pPatches; inline VertexPatchIterator(const PatchIndex& patchIndex, const SeamVertices& seamVertices) : idx(NO_ID) { if (patchIndex.bIndex) { pPatches = &seamVertices[patchIndex.idxSeamVertex].patches; } else { idxPatch = patchIndex.idxPatch; pPatches = NULL; } } inline operator uint32_t () const { return idxPatch; } inline bool Next() { if (pPatches == NULL) return (idx++ == NO_ID); if (++idx >= pPatches->size()) return false; idxPatch = (*pPatches)[idx].idxPatch; return true; } }; mutable FloatArr meshCurvatures; // 存储每个面的曲率值 void ComputeFaceCurvatures() const; const Image8U3* alternativeTexture; // 备用纹理指针 // used to sample seam edges typedef TAccumulator AccumColor; typedef Sampler::Linear Sampler; struct SampleImage { AccumColor accumColor; const Image8U3& image; const Sampler sampler; inline SampleImage(const Image8U3& _image) : image(_image), sampler() {} // sample the edge with linear weights void AddEdge(const TexCoord& p0, const TexCoord& p1) { const TexCoord p01(p1 - p0); const float length(norm(p01)); ASSERT(length > 0.f); const int nSamples(ROUND2INT(MAXF(length, 1.f) * 2.f)-1); AccumColor edgeAccumColor; for (int s=0; s(s) / nSamples); const TexCoord samplePos(p0 + p01 * len); const Color color(image.sample(sampler, samplePos)); edgeAccumColor.Add(RGB2YCBCR(color), 1.f-len); } accumColor.Add(edgeAccumColor.Normalized(), length); } // returns accumulated color Color GetColor() const { return accumColor.Normalized(); } }; // used to interpolate adjustments color over the whole texture patch typedef TImage ColorMap; /* struct ColorF { float r, g, b, a; ColorF() : r(0), g(0), b(0), a(0) {} ColorF(float _r, float _g, float _b, float _a=1.0f) : r(_r), g(_g), b(_b), a(_a) {} }; */ public: MeshTexture(Scene& _scene, unsigned _nResolutionLevel=0, unsigned _nMinResolution=640); ~MeshTexture(); void ListVertexFaces(bool bUseExistingUV = false); bool ListCameraFaces(FaceDataViewArr&, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& views, bool bUseVirtualFaces); bool CheckInvalidFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces); bool IsFaceVisibleAndValid(const FaceDataArr& faceDatas, const IIndexArr& selectedCams) const; std::unordered_set PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh, IIndex idxView, std::string strViewName); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA bool FaceOutlierDetection(FaceDataArr& faceDatas, float fOutlierThreshold) const; #endif void CreateVirtualFaces(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; void CreateVirtualFaces2(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, const Mesh::FaceIdxArr& faceIndices, float thMaxNormalDeviation=25.f) const; void CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; void CreateVirtualFaces4(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, Mesh::FaceIdxArr& mapFaceToVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f); void CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; bool CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; bool CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; IIndexArr SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; IIndexArr SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; bool FaceViewSelection(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); bool FaceViewSelection2(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); bool FaceViewSelection3(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views, bool bUseExistingUV); bool FaceViewSelection4( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views, const Mesh::FaceIdxArr* faceIndices = nullptr); void CreateAdaptiveVirtualFaces(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras); bool ShouldMergeVirtualFace(const MeshTexture::FaceDataViewArr& facesDatas, const Mesh::FaceIdxArr& currentVirtualFace, FIndex candidateFace, unsigned minCommonCameras); uint32_t FindNearestPatchForFaces(const std::vector& faceIndices); void FixIsolatedComponents(); void ReinitializeSeamData(); bool ValidateSeamDataConsistency(); bool ReassignComponentForFace(FIndex faceIdx); bool ReassignFaceToCorrectPatch(FIndex faceIdx); void CleanSeamEdgesComprehensive(); void CreateSeamVertices(); uint32_t FindOrCreateComponentForFace(FIndex faceIdx); uint32_t FindNearestPatchForComponent(uint32_t compID); void GlobalSeamLeveling(); void GlobalSeamLeveling3(); void LocalSeamLeveling(); void LocalSeamLeveling3(); void GlobalSeamLevelingExternalUV(); void LocalSeamLevelingExternalUV(); void GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& baseFileName, bool bOriginFaceview, Scene *pScene); void GenerateTextureForUV(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename, bool bOriginFaceview, Scene *pScene, Mesh::TexCoordArr& existingTexcoords, Mesh::TexIndexArr& existingTexindices); void GenerateTexture2(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& baseFileName); bool TextureWithExistingUV( const IIndexArr& views, int nIgnoreMaskLabel, float fOutlierThreshold, unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight, const Mesh::Image8U3Arr& existingTextures, // 添加已有纹理参数 const Mesh::TexCoordArr& existingTexcoords, // 添加已有UV参数 const Mesh::TexIndexArr& existingTexindices // 添加已有纹理索引参数 ); bool GenerateTextureWithViewConsistency( bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const String& basename, bool bOriginFaceview, Scene* pScene); bool ValidateSeamDataForLeveling(); void CheckMemoryIntegrity(); void RebuildComponentMapping(); void AssignOrphanFacesToComponents(const std::vector& faceToPatch); void FixComponentMappingsOnceAndForAll(); void CleanSeamEdges(); // 在头文件中,修改函数声明: bool PackTextureAtlases( Mesh::TexCoordArr& faceTexcoords2, Mesh::TexIndexArr& faceTexindices2, std::vector& generatedTextures, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, int maxTextureSize); void SelectOptimalViewsWithConsistency( std::vector& faceViewData, int minPatchSize); void AssignComponentsToOrphanFaces(); void CreateConsistentTexturePatches( const std::vector& faceViewData, std::vector& patchAssignments, int minPatchSize); void GenerateHighQualityTexture( std::vector& textures, const Mesh::TexCoordArr& faceTexcoords, const Mesh::TexIndexArr& faceTexindices, float fSharpnessWeight, Pixel8U colEmpty); cv::Vec3f SampleHighQuality(const cv::Mat& image, const Point2f& point); void FillMissingPixelsWithViewConsistency( Image8U3& texture, cv::Mat3f& colorAccum, cv::Mat1f& weightAccum, cv::Mat1i& viewAccum, Pixel8U colEmpty); float ComputeViewQuality(FIndex idxFace, int viewID); float ComputeFaceDistance(FIndex fid1, FIndex fid2); cv::Rect ComputeOptimalPatchBounds(const AABB2f& aabb, const cv::Size& imageSize, int border); void GlobalSeamLevelingEnhanced(); void LocalSeamLevelingEnhanced(); std::pair FindSharedEdgeIndices(const Face& face0, const Face& face1); void MergeOverlappingPatches(Mesh::TexCoordArr& faceTexcoords2); void PackTexturePatches(const Mesh::TexCoordArr& faceTexcoords2, const Mesh::TexIndexArr& faceTexindices2, std::vector& generatedTextures, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, int maxTextureSize); void ApplyAdaptiveSharpening(std::vector& textures, float fSharpnessWeight); void FillTextureHoles(std::vector& textures, Pixel8U colEmpty); bool IsFaceVisibleFromView(FIndex idxFace, int viewID); cv::Mat EnhanceTextureQuality(const cv::Mat& texture, const std::vector& faceQualities); void OptimizeTextureSeams(const std::vector& textures, const std::vector>& seamPoints); void CheckColorChannels(const Image8U3& texture, const std::string& name); Mesh::Image8U3Arr GenerateTextureAtlasWith3DBridge( const LabelArr& faceLabels, const IIndexArr& views, const Mesh::Image8U3Arr& sourceTextures, const Mesh::TexCoordArr& sourceTexcoords, const Mesh::TexIndexArr& sourceTexindices, unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight); Mesh::Image8U3Arr GenerateTextureAtlasFromUV( const Mesh::Image8U3Arr& sourceTextures, // 已有纹理数组 const Mesh::TexCoordArr& sourceTexcoords, // 已有UV坐标 const Mesh::TexIndexArr& sourceTexindices, // 已有纹理索引 unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight ); Point2f ProjectPointWithAutoCorrection(const Camera& camera, const Vertex& worldPoint, const Image& sourceImage); Point2f ProjectPointRobust(const Camera& camera, const Vertex& worldPoint, const Image& sourceImage, float searchRadius = 0.02f); bool ValidateProjection(const Vertex& worldPoint, const Image& sourceImage, Point2f imgPoint, float maxReprojectionError = 1.5f); Pixel8U SampleImageBilinear(const Image8U3& image, const Point2f& point); void ProjectFaceToTexture(FIndex faceID, IIndex viewID, const TexCoord* uv, Image8U3& texture); bool PointInTriangle(const Point2f& p, const Point2f& a, const Point2f& b, const Point2f& c, Point3f& bary); int ComputeOptimalTextureSize(float uvWidth, float uvHeight, unsigned multiple); // Bruce //* template static inline PIXEL RGB2YCBCR(const PIXEL& v) { typedef typename PIXEL::Type T; return PIXEL( v[0] * T(0.299) + v[1] * T(0.587) + v[2] * T(0.114), v[0] * T(-0.168736) + v[1] * T(-0.331264) + v[2] * T(0.5) + T(128), v[0] * T(0.5) + v[1] * T(-0.418688) + v[2] * T(-0.081312) + T(128) ); } template static inline PIXEL YCBCR2RGB(const PIXEL& v) { typedef typename PIXEL::Type T; const T v1(v[1] - T(128)); const T v2(v[2] - T(128)); return PIXEL( v[0] + v2 * T(1.402), v[0] + v1 * T(-0.34414) + v2 * T(-0.71414), v[0] + v1 * T(1.772) ); } static inline float GetLuminance(const Color& rgb) { Color ycbcr = MeshTexture::RGB2YCBCR(rgb); return ycbcr[0]; // Y分量就是亮度 } // 定义结构体用于封装颜色中值和亮度中值 struct MedianValues { Color color; float quality; }; // 修改函数,返回MedianValues结构体 static MedianValues ComputeMedianColorAndQuality(const std::vector>& views) { std::vector colors; std::vector qualities; // 新增:存储质量值 for (const auto& view : views) { qualities.push_back(view.first); // 收集质量值 colors.push_back(view.second); // 收集颜色值 } // 对每个颜色通道和质量分别排序 std::vector r, g, b; for (const auto& color : colors) { r.push_back(color[0]); g.push_back(color[1]); b.push_back(color[2]); } std::sort(r.begin(), r.end()); std::sort(g.begin(), g.end()); std::sort(b.begin(), b.end()); std::sort(qualities.begin(), qualities.end()); // 对质量排序 const int mid = colors.size() / 2; MedianValues result; result.color = Color(r[mid], g[mid], b[mid]); // 颜色中值 result.quality = qualities[mid]; // 质量中值 return result; } // 计算亮度中值 static float ComputeMedianLuminance(const std::vector>& views) { std::vector luminances; for (const auto& view : views) { luminances.push_back(MeshTexture::GetLuminance(view.second)); } std::sort(luminances.begin(), luminances.end()); return luminances[luminances.size() / 2]; } // 计算颜色绝对中位差(MAD) static float ComputeColorMAD(const std::vector>& views, const Color& median) { std::vector distances; for (const auto& view : views) { distances.push_back(cv::norm(view.second - median)); } std::sort(distances.begin(), distances.end()); return distances[distances.size() / 2]; } // 计算亮度绝对中位差(MAD) static float ComputeLuminanceMAD(const std::vector>& views, float medianLuminance) { std::vector distances; for (const auto& view : views) { distances.push_back(std::abs(MeshTexture::GetLuminance(view.second) - medianLuminance)); } std::sort(distances.begin(), distances.end()); return distances[distances.size() / 2]; } //*/ /* // 采用ITU-R BT.601标准系数,增加数值稳定性处理 template static inline PIXEL RGB2YCBCR(const PIXEL& v) { typedef typename PIXEL::Type T; const T y = 0.257f * v[0] + 0.504f * v[1] + 0.098f * v[2] + 16.0f; const T cb = -0.148f * v[0] - 0.291f * v[1] + 0.439f * v[2] + 128.0f; const T cr = 0.439f * v[0] - 0.368f * v[1] - 0.071f * v[2] + 128.0f; return PIXEL( std::clamp(y, T(16), T(235)), std::clamp(cb, T(16), T(240)), std::clamp(cr, T(16), T(240)) ); } template static inline PIXEL YCBCR2RGB(const PIXEL& v) { typedef typename PIXEL::Type T; const T y = std::max(v[0] - T(16), T(0)); const T cb = v[1] - T(128); const T cr = v[2] - T(128); const T r = 1.164f * y + 1.596f * cr; const T g = 1.164f * y - 0.392f * cb - 0.813f * cr; const T b = 1.164f * y + 2.017f * cb; return PIXEL( std::clamp(r, T(0), T(255)), std::clamp(g, T(0), T(255)), std::clamp(b, T(0), T(255)) ); } */ /* // 在MeshTexture类中添加标准转换函数 template static inline PIXEL RGB2YCBCR(const PIXEL& v) { cv::Mat src(1, 1, CV_32FC3, const_cast(v.ptr())); cv::Mat dst; cv::cvtColor(src, dst, cv::COLOR_RGB2YCrCb); // 使用OpenCV标准转换 return PIXEL(dst.at(0)); } template static inline PIXEL YCBCR2RGB(const PIXEL& v) { cv::Mat src(1, 1, CV_32FC3, const_cast(v.ptr())); cv::Mat dst; cv::cvtColor(src, dst, cv::COLOR_YCrCb2RGB); // 使用OpenCV标准转换 return PIXEL(dst.at(0)); } //*/ // Mesh::FaceIdxArr m_mapFaceToVirtualFace; protected: static void ProcessMask(Image8U& mask, int stripWidth); static void PoissonBlending(const Image32F3& src, Image32F3& dst, const Image8U& mask, float bias=1.f); public: const unsigned nResolutionLevel; // how many times to scale down the images before mesh optimization const unsigned nMinResolution; // how many times to scale down the images before mesh optimization // store found texture patches TexturePatchArr texturePatches; LabelArr labelsInvalid; // used to compute the seam leveling PairIdxArr seamEdges; // the (face-face) edges connecting different texture patches Mesh::FaceIdxArr components; // for each face, stores the texture patch index to which belongs IndexArr mapIdxPatch; // remap texture patch indices after invalid patches removal SeamVertices seamVertices; // array of vertices on the border between two or more patches // valid the entire time Mesh::VertexFacesArr& vertexFaces; // for each vertex, the list of faces containing it BoolArr& vertexBoundary; // for each vertex, stores if it is at the boundary or not Mesh::FaceFacesArr& faceFaces; // for each face, the list of adjacent faces, NO_ID for border edges (optional) Mesh::TexCoordArr& faceTexcoords; // for each face, the texture-coordinates of the vertices Mesh::TexIndexArr& faceTexindices; // for each face, the texture-coordinates of the vertices Mesh::Image8U3Arr& texturesDiffuse; // texture containing the diffuse color Mesh::Image8U3Arr texturesDiffuseTemp; // texture containing the diffuse color // constant the entire time Mesh::VertexArr& vertices; Mesh::FaceArr& faces; ImageArr& images; Scene& scene; // the mesh vertices and faces }; // creating an invalid mask for the given image corresponding to // the invalid pixels generated during image correction for the lens distortion; // the returned mask has the same size as the image and is set to zero for invalid pixels static Image8U DetectInvalidImageRegions(const Image8U3& image) { const cv::Scalar upDiff(3); const int flags(8 | (255 << 8)); Image8U mask(image.rows + 2, image.cols + 2); mask.memset(0); Image8U imageGray; cv::cvtColor(image, imageGray, cv::COLOR_BGR2GRAY); if (imageGray(0, 0) == 0) cv::floodFill(imageGray, mask, cv::Point(0, 0), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(image.rows / 2, 0) == 0) cv::floodFill(imageGray, mask, cv::Point(0, image.rows / 2), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(image.rows - 1, 0) == 0) cv::floodFill(imageGray, mask, cv::Point(0, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(image.rows - 1, image.cols / 2) == 0) cv::floodFill(imageGray, mask, cv::Point(image.cols / 2, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(image.rows - 1, image.cols - 1) == 0) cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(image.rows / 2, image.cols - 1) == 0) cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, image.rows / 2), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(0, image.cols - 1) == 0) cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, 0), 255, NULL, cv::Scalar(0), upDiff, flags); if (imageGray(0, image.cols / 2) == 0) cv::floodFill(imageGray, mask, cv::Point(image.cols / 2, 0), 255, NULL, cv::Scalar(0), upDiff, flags); mask = (mask(cv::Rect(1,1, imageGray.cols,imageGray.rows)) == 0); return mask; } MeshTexture::MeshTexture(Scene& _scene, unsigned _nResolutionLevel, unsigned _nMinResolution) : nResolutionLevel(_nResolutionLevel), nMinResolution(_nMinResolution), vertexFaces(_scene.mesh.vertexFaces), vertexBoundary(_scene.mesh.vertexBoundary), faceFaces(_scene.mesh.faceFaces), faceTexcoords(_scene.mesh.faceTexcoords), faceTexindices(_scene.mesh.faceTexindices), texturesDiffuse(_scene.mesh.texturesDiffuse), texturesDiffuseTemp(_scene.mesh.texturesDiffuse), vertices(_scene.mesh.vertices), faces(_scene.mesh.faces), images(_scene.images), scene(_scene), alternativeTexture(nullptr) { } MeshTexture::~MeshTexture() { vertexFaces.Release(); vertexBoundary.Release(); faceFaces.Release(); } void MeshTexture::ComputeFaceCurvatures() const { if (scene.mesh.vertices.empty() || scene.mesh.faces.empty()) return; // 避免操作空数据 const Mesh& mesh = scene.mesh; meshCurvatures.resize(mesh.faces.size()); // 1. 计算顶点曲率 std::vector vertexCurvatures(mesh.vertices.size(), 0.0f); FOREACH(idxVert, mesh.vertices) { if (idxVert >= mesh.vertexFaces.size() || idxVert >= mesh.vertexNormals.size()) continue; const Normal& normalCenter = mesh.vertexNormals[idxVert]; float sumAngle = 0.0f; int count = 0; // 遍历相邻面 const Mesh::FaceIdxArr& vf = mesh.vertexFaces[idxVert]; // 获取该顶点的相邻面数组 for (FIndex adjFace : vf) { const Normal& adjNormal = mesh.faceNormals[adjFace]; sumAngle += ComputeAngleN(normalCenter.ptr(), adjNormal.ptr()); ++count; } // 曲率 = 法线角度变化的方差 vertexCurvatures[idxVert] = (count > 1) ? sumAngle / count : 0.0f; } // 2. 转换为面曲率 FOREACH(idxFace, mesh.faces) { const Mesh::Face& f = mesh.faces[idxFace]; float curvature = 0.0f; for (int i = 0; i < 3; ++i) { curvature += vertexCurvatures[f[i]]; } meshCurvatures[idxFace] = curvature / 3.0f; } } // extract array of triangles incident to each vertex // and check each vertex if it is at the boundary or not void MeshTexture::ListVertexFaces(bool bUseExistingUV) { // if (!bUseExistingUV) scene.mesh.EmptyExtra(); scene.mesh.ListIncidenteFaces(); scene.mesh.ListBoundaryVertices(); scene.mesh.ListIncidenteFaceFaces(); } // extract array of faces viewed by each image bool MeshTexture::ListCameraFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces) { // create faces octree Mesh::Octree octree; Mesh::FacesInserter::CreateOctree(octree, scene.mesh); #ifdef TEXOPT_USE_OPENMP #pragma omp critical(invalid_faces_access) #endif { // scene.mesh.invalidFaces.clear(); } // extract array of faces viewed by each image IIndexArr views(_views); if (views.empty()) { views.resize(images.size()); std::iota(views.begin(), views.end(), IIndex(0)); } facesDatas.resize(faces.size()); // viewDepthMaps.resize(views.size()); // 初始化深度图存储 Util::Progress progress(_T("Initialized views"), views.size()); typedef float real; TImage imageGradMag; TImage::EMat mGrad[2]; FaceMap faceMap; DepthMap depthMap; #ifdef TEXOPT_USE_OPENMP bool bAbort(false); #pragma omp parallel for private(imageGradMag, mGrad, faceMap, depthMap) for (int_t idx=0; idx<(int_t)views.size(); ++idx) { #pragma omp flush (bAbort) if (bAbort) { ++progress; continue; } const IIndex idxView(views[(IIndex)idx]); #else for (IIndex idxView: views) { #endif Image& imageData = images[idxView]; if (!imageData.IsValid()) { ++progress; continue; } std::string strPath = imageData.name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); /* // if (strName!="74_8" && strName!="13_8" && strName!="61_8" && // strName!="92_8" && strName!="101_8" && strName!="102_8" && // strName!="103_8" && strName!="112_8" && strName!="113_8" && // strName!="122_8" && strName!="123_8" && strName!="132_8") // if (strName!="74_8" && strName!="13_8" && strName!="61_8" && // strName!="92_8" && strName!="101_8" && strName!="102_8" && // strName!="103_8" && strName!="112_8" && strName!="113_8" && // strName!="122_2" && strName!="123_2" && strName!="121_2") // if (strName!="122_2") if (strName!="122_2" && strName!="123_2" && strName!="121_2") { continue; } //*/ // load image unsigned level(nResolutionLevel); const unsigned imageSize(imageData.RecomputeMaxResolution(level, nMinResolution)); if ((imageData.image.empty() || MAXF(imageData.width,imageData.height) != imageSize) && !imageData.ReloadImage(imageSize)) { #ifdef TEXOPT_USE_OPENMP bAbort = true; #pragma omp flush (bAbort) continue; #else return false; #endif } imageData.UpdateCamera(scene.platforms); // compute gradient magnitude imageData.image.toGray(imageGradMag, cv::COLOR_BGR2GRAY, true); cv::Mat grad[2]; mGrad[0].resize(imageGradMag.rows, imageGradMag.cols); grad[0] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType::type, (void*)mGrad[0].data()); mGrad[1].resize(imageGradMag.rows, imageGradMag.cols); grad[1] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType::type, (void*)mGrad[1].data()); #if 1 cv::Sobel(imageGradMag, grad[0], cv::DataType::type, 1, 0, 3, 1.0/8.0); cv::Sobel(imageGradMag, grad[1], cv::DataType::type, 0, 1, 3, 1.0/8.0); #elif 1 const TMatrix kernel(CreateDerivativeKernel3x5()); cv::filter2D(imageGradMag, grad[0], cv::DataType::type, kernel); cv::filter2D(imageGradMag, grad[1], cv::DataType::type, kernel.t()); #else const TMatrix kernel(CreateDerivativeKernel5x7()); cv::filter2D(imageGradMag, grad[0], cv::DataType::type, kernel); cv::filter2D(imageGradMag, grad[1], cv::DataType::type, kernel.t()); #endif (TImage::EMatMap)imageGradMag = (mGrad[0].cwiseAbs2()+mGrad[1].cwiseAbs2()).cwiseSqrt(); // apply some blur on the gradient to lower noise/glossiness effects onto face-quality score cv::GaussianBlur(imageGradMag, imageGradMag, cv::Size(15, 15), 0, 0, cv::BORDER_DEFAULT); // select faces inside view frustum Mesh::FaceIdxArr cameraFaces; Mesh::FacesInserter inserter(cameraFaces); const TFrustum frustum(Matrix3x4f(imageData.camera.P), (float)imageData.width, (float)imageData.height); octree.Traverse(frustum, inserter); // project all triangles in this view and keep the closest ones faceMap.create(imageData.GetSize()); depthMap.create(imageData.GetSize()); std::unordered_set tempFaces; if (false) { // viewDepthMaps[idxView] = depthMap; for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { // printf("1depthMap(r, c)=%f\n", depthMap(r, c)); } } RasterMesh::Triangle triangle; RasterMesh rasterer1(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer1); if (nIgnoreMaskLabel >= 0) { // import mask BitMatrix bmask; // std::cout << "nIgnoreMaskLabel is open" << std::endl; DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer1.mask); } else if (nIgnoreMaskLabel == -1) { // creating mask to discard invalid regions created during image radial undistortion rasterer1.mask = DetectInvalidImageRegions(imageData.image); #if TD_VERBOSE != TD_VERBOSE_OFF if (VERBOSITY_LEVEL > 2) cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer1.mask); #endif } rasterer1.Clear(); #ifdef TEXOPT_USE_OPENMP #pragma omp critical(invalid_faces_access) #endif { std::lock_guard lock(*scene.mesh.invalidFaces.mtx); scene.mesh.invalidFaces.data.clear(); } printf("imageData.name=%s\n", imageData.name.c_str()); for (FIndex idxFace : cameraFaces) { if (idxFace >= faces.size()) { DEBUG_EXTRA("Invalid face index %u (max %u) in view %u", idxFace, faces.size()-1, idxView); continue; } // // 添加临界区保护 // bool skipFace = false; // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical(invalid_faces_access) // #endif // { // std::lock_guard lock(scene.mesh.invalidFacesMutex); // skipFace = (scene.mesh.invalidFaces.find(idxFace) != scene.mesh.invalidFaces.end()); // } // if (skipFace) // { // continue; // } rasterer1.validFace = true; const Face& facet = faces[idxFace]; rasterer1.idxFace = idxFace; if (scene.is_face_visible(strName.c_str(), idxFace)) { rasterer1.Project(facet, triangleRasterizer); if (!rasterer1.validFace) rasterer1.Project(facet, triangleRasterizer); } } // if (!bUseVirtualFaces) // if (bUseVirtualFaces) tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); // RasterMesh rasterer2(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, true); RasterMesh rasterer2(*this, vertices, imageData.camera, depthMap, faceMap, std::ref(scene.mesh), true); for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { // if (depthMap(r, c)> 999.0f) // printf("2depthMap(r, c)=%f\n", depthMap(r, c)); } } RasterMesh::TriangleRasterizer triangleRasterizer2(triangle, rasterer2); if (nIgnoreMaskLabel >= 0) { // import mask BitMatrix bmask; // std::cout << "nIgnoreMaskLabel is open" << std::endl; DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer2.mask); } else if (nIgnoreMaskLabel == -1) { // creating mask to discard invalid regions created during image radial undistortion rasterer2.mask = DetectInvalidImageRegions(imageData.image); #if TD_VERBOSE != TD_VERBOSE_OFF if (VERBOSITY_LEVEL > 2) cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer2.mask); #endif } for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { // if (depthMap(r, c)> 999.0f) // printf("3depthMap(r, c)=%f, r=%d, c=%d, faceMap(r, c)=%d\n", depthMap(r, c), r, c, faceMap(r, c)); } } // rasterer2.Clear(); for (FIndex idxFace : cameraFaces) { // 添加面索引有效性检查 if (idxFace >= faces.size()) { DEBUG_EXTRA("Invalid face index %u (max %u) in view %u", idxFace, faces.size()-1, idxView); continue; } // 添加临界区保护 bool skipFace = false; #ifdef TEXOPT_USE_OPENMP #pragma omp critical(invalid_faces_access) #endif { std::lock_guard lock(*scene.mesh.invalidFaces.mtx); skipFace = (scene.mesh.invalidFaces.data.find(idxFace) != scene.mesh.invalidFaces.data.end()); } if (skipFace) { continue; } if (tempFaces.find(idxFace) != tempFaces.end()) { } else { continue; } rasterer2.validFace = true; const Face& facet = faces[idxFace]; rasterer2.idxFace = idxFace; // rasterer2.Project(facet, triangleRasterizer2); // if (!rasterer2.validFace) // rasterer2.Project(facet, triangleRasterizer2); } for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { // if (depthMap(r, c)> 999.0f) // printf("4depthMap(r, c)=%f, r=%d, c=%d, faceMap(r, c)=%d\n", depthMap(r, c), r, c, faceMap(r, c)); } } } else { // RasterMesh rasterer(vertices, imageData.camera, depthMap, faceMap); RasterMesh rasterer(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); RasterMesh::Triangle triangle; RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer); if (nIgnoreMaskLabel >= 0) { // import mask BitMatrix bmask; // std::cout << "nIgnoreMaskLabel is open" << std::endl; DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer.mask); } else if (nIgnoreMaskLabel == -1) { // creating mask to discard invalid regions created during image radial undistortion rasterer.mask = DetectInvalidImageRegions(imageData.image); #if TD_VERBOSE != TD_VERBOSE_OFF if (VERBOSITY_LEVEL > 2) cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer.mask); #endif } rasterer.Clear(); for (FIndex idxFace : cameraFaces) { rasterer.validFace = true; const Face& facet = faces[idxFace]; rasterer.idxFace = idxFace; if (scene.is_face_visible(strName.c_str(), idxFace)) { rasterer.Project(facet, triangleRasterizer); if (!rasterer.validFace) rasterer.Project(facet, triangleRasterizer); } } // if (!bUseVirtualFaces) // if (bUseVirtualFaces) tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); } // compute the projection area of visible faces #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA CLISTDEF0IDX(uint32_t,FIndex) areas(faces.size()); areas.Memset(0); #endif #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { // faceQuality is influenced by : // + area: the higher the area the more gradient scores will be added to the face quality // + sharpness: sharper image or image resolution or how close is to the face will result in higher gradient on the same face // ON GLOSS IMAGES it happens to have a high volatile sharpness depending on how the light reflects under different angles // + angle: low angle increases the surface area for (int j=0; j 0)); if (idxFace == NO_ID) continue; FaceDataArr& faceDatas = facesDatas[idxFace]; const Pixel8U& pixel = imageData.image(j, i); // 假设是8位图像,RGB三个通道任一超过250即视为过曝 if (pixel.r > 250 || pixel.g > 250 || pixel.b > 250) { // continue; } // if (!(scene.mesh.invalidFacesRelative.data.contains(idxFace) && scene.is_face_visible_relative(idxFace))) // if (false) { if (depthMap(j,i)>999.0f) { /* // continue; // printf("idxFace=%d, depthMap(j,i=%f\n", idxFace, depthMap(j,i)); FaceData& faceData = faceDatas.emplace_back(); faceData.idxView = idxView; faceData.quality = imageGradMag(j,i); faceData.bInvalidFacesRelative = true; // printf("faceData.quality=%f\n", faceData.quality); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color = imageData.image(j,i); #endif continue; */ #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA uint32_t& area = areas[idxFace]; if (area++ == 0) { #else if (faceDatas.empty() || faceDatas.back().idxView != idxView) { #endif // create new face-data FaceData& faceData = faceDatas.emplace_back(); faceData.idxView = idxView; faceData.quality = imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color = imageData.image(j,i); #endif faceData.bInvalidFacesRelative = true; } else { // update face-data ASSERT(!faceDatas.empty()); FaceData& faceData = faceDatas.back(); ASSERT(faceData.idxView == idxView); faceData.quality = imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color = Color(imageData.image(j,i)); #endif faceData.bInvalidFacesRelative = true; } continue; } } // if (tempFaces.find(idxFace) == tempFaces.end()) // continue; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA uint32_t& area = areas[idxFace]; if (area++ == 0) { #else if (faceDatas.empty() || faceDatas.back().idxView != idxView) { #endif // create new face-data FaceData& faceData = faceDatas.emplace_back(); faceData.idxView = idxView; faceData.quality = imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color = imageData.image(j,i); #endif if (depthMap(j,i)>999.0f) faceData.bInvalidFacesRelative = true; } else { // update face-data ASSERT(!faceDatas.empty()); FaceData& faceData = faceDatas.back(); ASSERT(faceData.idxView == idxView); faceData.quality += imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color += Color(imageData.image(j,i)); #endif if (depthMap(j,i)>999.0f) faceData.bInvalidFacesRelative = true; } } } // adjust face quality with camera angle relative to face normal // tries to increase chances of a camera with perpendicular view on the surface (smoothened normals) to be selected FOREACH(idxFace, facesDatas) { FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty() || faceDatas.back().idxView != idxView) continue; const Face& f = faces[idxFace]; const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); const Point3f camDir(Cast(imageData.camera.C) - faceCenter); const Normal& faceNormal = scene.mesh.faceNormals[idxFace]; const float cosFaceCam(MAXF(0.001f, ComputeAngle(camDir.ptr(), faceNormal.ptr()))); faceDatas.back().quality *= SQUARE(cosFaceCam); } #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA FOREACH(idxFace, areas) { const uint32_t& area = areas[idxFace]; if (area > 0) { Color& color = facesDatas[idxFace].back().color; color = RGB2YCBCR(Color(color * (1.f/(float)area))); } } #endif } ++progress; } #ifdef TEXOPT_USE_OPENMP if (bAbort) return false; #endif progress.close(); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA if (fOutlierThreshold > 0) { // try to detect outlier views for each face // (views for which the face is occluded by a dynamic object in the scene, ex. pedestrians) for (FaceDataArr& faceDatas: facesDatas) FaceOutlierDetection(faceDatas, fOutlierThreshold); } #endif return true; } bool MeshTexture::CheckInvalidFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces) { // create faces octree Mesh::Octree octree; Mesh::FacesInserter::CreateOctree(octree, scene.mesh); #ifdef TEXOPT_USE_OPENMP #pragma omp critical(invalid_faces_access) #endif { // scene.mesh.invalidFaces.clear(); } // extract array of faces viewed by each image IIndexArr views(_views); if (views.empty()) { views.resize(images.size()); std::iota(views.begin(), views.end(), IIndex(0)); } // facesDatas.resize(faces.size()); Util::Progress progress(_T("Initialized views"), views.size()); typedef float real; TImage imageGradMag; TImage::EMat mGrad[2]; FaceMap faceMap; DepthMap depthMap; #ifdef TEXOPT_USE_OPENMP bool bAbort(false); #pragma omp parallel for private(imageGradMag, mGrad, faceMap, depthMap) for (int_t idx=0; idx<(int_t)views.size(); ++idx) { #pragma omp flush (bAbort) if (bAbort) { ++progress; continue; } const IIndex idxView(views[(IIndex)idx]); #else for (IIndex idxView: views) { #endif Image& imageData = images[idxView]; if (!imageData.IsValid()) { ++progress; continue; } std::string strPath = imageData.name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); // load image unsigned level(nResolutionLevel); const unsigned imageSize(imageData.RecomputeMaxResolution(level, nMinResolution)); if ((imageData.image.empty() || MAXF(imageData.width,imageData.height) != imageSize) && !imageData.ReloadImage(imageSize)) { #ifdef TEXOPT_USE_OPENMP bAbort = true; #pragma omp flush (bAbort) continue; #else return false; #endif } imageData.UpdateCamera(scene.platforms); // compute gradient magnitude imageData.image.toGray(imageGradMag, cv::COLOR_BGR2GRAY, true); cv::Mat grad[2]; mGrad[0].resize(imageGradMag.rows, imageGradMag.cols); grad[0] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType::type, (void*)mGrad[0].data()); mGrad[1].resize(imageGradMag.rows, imageGradMag.cols); grad[1] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType::type, (void*)mGrad[1].data()); #if 1 cv::Sobel(imageGradMag, grad[0], cv::DataType::type, 1, 0, 3, 1.0/8.0); cv::Sobel(imageGradMag, grad[1], cv::DataType::type, 0, 1, 3, 1.0/8.0); #elif 1 const TMatrix kernel(CreateDerivativeKernel3x5()); cv::filter2D(imageGradMag, grad[0], cv::DataType::type, kernel); cv::filter2D(imageGradMag, grad[1], cv::DataType::type, kernel.t()); #else const TMatrix kernel(CreateDerivativeKernel5x7()); cv::filter2D(imageGradMag, grad[0], cv::DataType::type, kernel); cv::filter2D(imageGradMag, grad[1], cv::DataType::type, kernel.t()); #endif (TImage::EMatMap)imageGradMag = (mGrad[0].cwiseAbs2()+mGrad[1].cwiseAbs2()).cwiseSqrt(); // apply some blur on the gradient to lower noise/glossiness effects onto face-quality score cv::GaussianBlur(imageGradMag, imageGradMag, cv::Size(15, 15), 0, 0, cv::BORDER_DEFAULT); // select faces inside view frustum Mesh::FaceIdxArr cameraFaces; Mesh::FacesInserter inserter(cameraFaces); const TFrustum frustum(Matrix3x4f(imageData.camera.P), (float)imageData.width, (float)imageData.height); octree.Traverse(frustum, inserter); // project all triangles in this view and keep the closest ones faceMap.create(imageData.GetSize()); depthMap.create(imageData.GetSize()); std::unordered_set tempFaces; { // RasterMesh rasterer(vertices, imageData.camera, depthMap, faceMap); RasterMesh rasterer(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); RasterMesh::Triangle triangle; RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer); if (nIgnoreMaskLabel >= 0) { // import mask BitMatrix bmask; // std::cout << "nIgnoreMaskLabel is open" << std::endl; DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer.mask); } else if (nIgnoreMaskLabel == -1) { // creating mask to discard invalid regions created during image radial undistortion rasterer.mask = DetectInvalidImageRegions(imageData.image); #if TD_VERBOSE != TD_VERBOSE_OFF if (VERBOSITY_LEVEL > 2) cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer.mask); #endif } rasterer.Clear(); for (FIndex idxFace : cameraFaces) { rasterer.validFace = true; const Face& facet = faces[idxFace]; rasterer.idxFace = idxFace; if (scene.is_face_visible(strName.c_str(), idxFace)) { rasterer.Project(facet, triangleRasterizer); if (!rasterer.validFace) rasterer.Project(facet, triangleRasterizer); } } // if (!bUseVirtualFaces) // if (bUseVirtualFaces) tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); } // compute the projection area of visible faces #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA CLISTDEF0IDX(uint32_t,FIndex) areas(faces.size()); areas.Memset(0); #endif #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { // faceQuality is influenced by : // + area: the higher the area the more gradient scores will be added to the face quality // + sharpness: sharper image or image resolution or how close is to the face will result in higher gradient on the same face // ON GLOSS IMAGES it happens to have a high volatile sharpness depending on how the light reflects under different angles // + angle: low angle increases the surface area for (int j=0; j 0)); if (idxFace == NO_ID) continue; FaceDataArr& faceDatas = facesDatas[idxFace]; // // if (!(scene.mesh.invalidFacesRelative.data.contains(idxFace) && scene.is_face_visible_relative(idxFace))) // if (false) // { // if (depthMap(j,i)>999.0f) // { // // continue; // // printf("idxFace=%d, depthMap(j,i=%f\n", idxFace, depthMap(j,i)); // FaceData& faceData = faceDatas.emplace_back(); // faceData.idxView = idxView; // faceData.quality = imageGradMag(j,i); // faceData.bInvalidFacesRelative = true; // // printf("faceData.quality=%f\n", faceData.quality); // #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // faceData.color = imageData.image(j,i); // #endif // continue; // } // } // // if (tempFaces.find(idxFace) == tempFaces.end()) // // continue; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA uint32_t& area = areas[idxFace]; if (area++ == 0) { #else if (faceDatas.empty() || faceDatas.back().idxView != idxView) { #endif // create new face-data FaceData& faceData = faceDatas.emplace_back(); faceData.idxView = idxView; faceData.quality = imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color = imageData.image(j,i); #endif if (depthMap(j,i)>999.0f) faceData.bInvalidFacesRelative = true; } else { // update face-data ASSERT(!faceDatas.empty()); FaceData& faceData = faceDatas.back(); ASSERT(faceData.idxView == idxView); faceData.quality += imageGradMag(j,i); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA faceData.color += Color(imageData.image(j,i)); #endif if (depthMap(j,i)>999.0f) faceData.bInvalidFacesRelative = true; } } } // adjust face quality with camera angle relative to face normal // tries to increase chances of a camera with perpendicular view on the surface (smoothened normals) to be selected FOREACH(idxFace, facesDatas) { FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty() || faceDatas.back().idxView != idxView) continue; const Face& f = faces[idxFace]; const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); const Point3f camDir(Cast(imageData.camera.C) - faceCenter); const Normal& faceNormal = scene.mesh.faceNormals[idxFace]; const float cosFaceCam(MAXF(0.001f, ComputeAngle(camDir.ptr(), faceNormal.ptr()))); faceDatas.back().quality *= SQUARE(cosFaceCam); } #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA FOREACH(idxFace, areas) { const uint32_t& area = areas[idxFace]; if (area > 0) { Color& color = facesDatas[idxFace].back().color; color = RGB2YCBCR(Color(color * (1.f/(float)area))); } } #endif } ++progress; } #ifdef TEXOPT_USE_OPENMP if (bAbort) return false; #endif progress.close(); #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA if (fOutlierThreshold > 0) { // try to detect outlier views for each face // (views for which the face is occluded by a dynamic object in the scene, ex. pedestrians) for (FaceDataArr& faceDatas: facesDatas) FaceOutlierDetection(faceDatas, fOutlierThreshold); } #endif return true; } bool MeshTexture::IsFaceVisibleAndValid(const FaceDataArr& faceDatas, const IIndexArr& selectedCams) const { for (IIndex camID : selectedCams) { bool valid = false; for (const FaceData& fd : faceDatas) { if (fd.idxView == camID && !fd.bInvalidFacesRelative) { valid = true; break; } } if (!valid) return false; // 存在无效或不可见 } return true; } //* // 在MeshTexture类中添加局部深度一致性检查方法 std::unordered_set MeshTexture::PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh, IIndex idxView, std::string strViewName) { // mesh.invalidFaces.clear(); std::unordered_set tempFaces; // 设置深度一致性阈值(可以根据场景调整) const float depthThreshold = 0.005f; // 绝对深度阈值(米)0.1f 0.01f 0.005f 0.001f const float relativeThreshold = 0.1f; // 相对深度阈值(5%)0.05f const int kernelSize = 30; // 检测核大小 30 const int halfKernel = kernelSize / 2; // 创建深度一致性标记图 Image8U consistencyMask(depthMap.size()); consistencyMask.memset(0); for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { const FIndex idxFace = faceMap(r, c); if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { continue; } #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { tempFaces.insert(idxFace); } } } // 创建深度图的副本用于检查(避免修改原始深度图) DepthMap depthMapCopy = depthMap.clone(); int n1 = 0; int n2 = 0; int n3 = 0; int n4 = 0; for (int r = 1; r < depthMapCopy.rows - 1; ++r) for (int c = 1; c < depthMapCopy.cols - 1; ++c) consistencyMask(r, c) = 0; // 第一遍:标记所有深度不一致的像素 for (int r = halfKernel; r < depthMapCopy.rows - halfKernel; ++r) { for (int c = halfKernel; c < depthMapCopy.cols - halfKernel; ++c) { const Depth centerDepth = depthMapCopy(r, c); // const Depth centerDepth = depthMap(r, c); if (centerDepth <= 0.0f) { ++n1; // consistencyMask(r, c) = 255; // 标记为不一致 continue; } else ++n2; // consistencyMask(r, c) = 255; // 标记为不一致 // continue; // if (centerDepth>0.0f) // printf("1Test Depth=%f\n", centerDepth); // 计算局部深度平均值 float sum = 0.0f; int count = 0; for (int dr = -halfKernel; dr <= halfKernel; ++dr) { for (int dc = -halfKernel; dc <= halfKernel; ++dc) { const Depth neighborDepth = depthMapCopy(r+dr, c+dc); if (neighborDepth > 0.0f) { sum += neighborDepth; count++; } } } // if (count < 4) // 要求至少有4个有效邻居 // continue; const float avgDepth = sum / count; const float absDiff = std::abs(centerDepth - avgDepth); const float relDiff = absDiff / avgDepth; // printf("2Test %f, %f, %f, %f, %f, %f\n", centerDepth, avgDepth, absDiff, depthThreshold, relDiff, relativeThreshold); // 检查是否超过阈值 // if (absDiff > depthThreshold || relDiff > relativeThreshold) if (absDiff > depthThreshold) { // printf("consistencyMask %d, %d\n", r, c); consistencyMask(r, c) = 255; // 标记为不一致 ++n3; } } } // 创建膨胀后的掩码副本 Image8U dilatedMask = consistencyMask.clone(); // 仅对原始不一致像素进行膨胀 for (int r = 1; r < depthMapCopy.rows - 1; ++r) { for (int c = 1; c < depthMapCopy.cols - 1; ++c) { if (consistencyMask(r, c) != 255) // 只处理原始不一致像素 continue; n4++; // 扩展标记区域(避免重复计数)9 2 for (int dr = -9; dr <= 9; ++dr) { for (int dc = -9; dc <= 9; ++dc) { const int nr = r + dr; const int nc = c + dc; // 确保在图像范围内 if (dilatedMask.isInside(ImageRef(nc, nr))) { dilatedMask(nr, nc) = 255; } } } } } consistencyMask = dilatedMask; // printf("n1=%d, n2=%d, n3=%d, n4=%d\n", n1, n2, n3, n4); for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { const FIndex idxFace = faceMap(r, c); if (consistencyMask(r, c) == 255) { if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { continue; } { #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { std::lock_guard lock(*mesh.invalidFaces.mtx); mesh.invalidFaces.data.insert(idxFace); // 标记面片无效 // mesh.invalidFacesAll[idxView].data.insert(idxFace); // std::lock_guard lock2(*mesh.invalidFacesRelative.mtx); // mesh.invalidFacesRelative.data.insert(idxFace); // 标记面片无效 } } } else { // std::lock_guard lock(*mesh.invalidFacesRelative.mtx); // if (mesh.invalidFacesRelative.Contains(idxFace)) { // mesh.invalidFacesRelative.Remove(idxFace); } } } } // 第三遍:清除不一致区域的深度值(现在可以安全修改原始depthMap) for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { if (consistencyMask(r, c) == 255) { const FIndex idxFace = faceMap(r, c); if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { continue; } // if (!scene.is_face_visible(strViewName.c_str(), faceMap(r, c))) { // depthMap(r, c) = 0; depthMap(r, c) = 1000.0f; // printf("depthMap(r, c)=%f, r=%d, c=%d\n",depthMap(r, c), r, c); // faceMap(r, c) = NO_ID; // printf("depthMap(r, c)=%f\n", depthMap(r, c)); } } } } // 可选:保存一致性掩码用于调试 #if TD_VERBOSE != TD_VERBOSE_OFF if (VERBOSITY_LEVEL > 2) { static int counter = 0; // cv::imwrite(String::FormatString("depth_consistency_%04d.png", counter++), consistencyMask); } #endif return tempFaces; } //*/ /* void MeshTexture::PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh) { // 参数设置 const float depthThreshold = 0.05f; // 绝对深度阈值 const float relativeThreshold = 0.05f; // 相对深度阈值 const int kernelSize = 30; // 检测核大小 const int halfKernel = kernelSize / 2; // 创建深度一致性标记图 Image8U consistencyMask(depthMap.size()); consistencyMask.memset(0); // 使用积分图加速局部平均值计算 DepthMap integralMap; cv::integral(depthMap, integralMap, CV_32F); // 第一遍:标记所有深度不一致的像素 for (int r = halfKernel; r < depthMap.rows - halfKernel; ++r) { for (int c = halfKernel; c < depthMap.cols - halfKernel; ++c) { const Depth centerDepth = depthMap(r, c); if (centerDepth <= 0.0f) continue; // 使用积分图计算局部平均值 const float sum = integralMap(r+halfKernel+1, c+halfKernel+1) - integralMap(r-halfKernel, c+halfKernel+1) - integralMap(r+halfKernel+1, c-halfKernel) + integralMap(r-halfKernel, c-halfKernel); const int count = kernelSize * kernelSize; const float avgDepth = sum / count; // 计算绝对差值和相对差值 const float absDiff = std::abs(centerDepth - avgDepth); const float relDiff = absDiff / avgDepth; // 结合绝对和相对阈值判断 if (absDiff > depthThreshold && relDiff > relativeThreshold) { // consistencyMask(r, c) = 255; } } } // 使用形态学膨胀扩展不一致区域 cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5)); cv::dilate(consistencyMask, consistencyMask, kernel); // 标记不一致区域对应的面片为无效 std::unordered_set invalidFaces; for (int r = 0; r < depthMap.rows; ++r) { for (int c = 0; c < depthMap.cols; ++c) { if (consistencyMask(r, c) == 255) { const FIndex idxFace = faceMap(r, c); if (idxFace != NO_ID) { // invalidFaces.insert(idxFace); } // 将不一致像素标记为无效 depthMap(r, c) = 0; faceMap(r, c) = NO_ID; } } } // 更新网格的无效面片集合 for (FIndex idxFace : invalidFaces) { // mesh.invalidFaces.insert(idxFace); } } //*/ // order the camera view scores with highest score first and return the list of first cameras // ratioAngleToQuality represents the ratio in witch we combine normal angle to quality for a face to obtain the selection score // - a ratio of 1 means only angle is considered // - a ratio of 0.5 means angle and quality are equally important // - a ratio of 0 means only camera quality is considered when sorting IIndexArr MeshTexture::SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const { ASSERT(!faceDatas.empty()); #if 1 //* CLISTDEF0IDX(FaceData,IIndex) validFaceDatas; for (const FaceData& fd : faceDatas) { if (!fd.bInvalidFacesRelative) { // 跳过无效视图 validFaceDatas.emplace_back(fd); } } if (validFaceDatas.empty()) { // 若无有效视图,选择质量最高的视图(即使无效) float maxQuality = -1; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); } //*/ // compute scores based on the view quality and its angle to the face normal float maxQuality = 0; for (const FaceData& faceData: validFaceDatas) maxQuality = MAXF(maxQuality, faceData.quality); const Face& f = faces[fid]; const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); CLISTDEF0IDX(float,IIndex) scores(validFaceDatas.size()); FOREACH(idxFaceData, validFaceDatas) { const FaceData& faceData = validFaceDatas[idxFaceData]; const Image& imageData = images[faceData.idxView]; const Point3f camDir(Cast(imageData.camera.C) - faceCenter); const Normal& faceNormal = scene.mesh.faceNormals[fid]; const float cosFaceCam(ComputeAngle(camDir.ptr(), faceNormal.ptr())); scores[idxFaceData] = ratioAngleToQuality*cosFaceCam + (1.f-ratioAngleToQuality)*faceData.quality/maxQuality; } // and sort the scores from to highest to smallest to get the best overall cameras IIndexArr scorePodium(validFaceDatas.size()); std::iota(scorePodium.begin(), scorePodium.end(), 0); scorePodium.Sort([&scores](IIndex i, IIndex j) { return scores[i] > scores[j]; }); #else // sort qualityPodium in relation to faceDatas[index].quality decreasing IIndexArr qualityPodium(faceDatas.size()); std::iota(qualityPodium.begin(), qualityPodium.end(), 0); qualityPodium.Sort([&faceDatas](IIndex i, IIndex j) { return faceDatas[i].quality > faceDatas[j].quality; }); // sort anglePodium in relation to face angle to camera increasing const Face& f = faces[fid]; const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); CLISTDEF0IDX(float,IIndex) cameraAngles(0, faceDatas.size()); for (const FaceData& faceData: faceDatas) { const Image& imageData = images[faceData.idxView]; const Point3f camDir(Cast(imageData.camera.C) - faceCenter); const Normal& faceNormal = scene.mesh.faceNormals[fid]; const float cosFaceCam(ComputeAngle(camDir.ptr(), faceNormal.ptr())); cameraAngles.emplace_back(cosFaceCam); } IIndexArr anglePodium(faceDatas.size()); std::iota(anglePodium.begin(), anglePodium.end(), 0); anglePodium.Sort([&cameraAngles](IIndex i, IIndex j) { return cameraAngles[i] > cameraAngles[j]; }); // combine podium scores to get overall podium // and sort the scores in smallest to highest to get the best overall camera for current virtual face CLISTDEF0IDX(float,IIndex) scores(faceDatas.size()); scores.Memset(0); FOREACH(sIdx, faceDatas) { scores[anglePodium[sIdx]] += ratioAngleToQuality * (sIdx+1); scores[qualityPodium[sIdx]] += (1.f - ratioAngleToQuality) * (sIdx+1); } IIndexArr scorePodium(faceDatas.size()); std::iota(scorePodium.begin(), scorePodium.end(), 0); scorePodium.Sort([&scores](IIndex i, IIndex j) { return scores[i] < scores[j]; }); #endif IIndexArr cameras(MIN(minCommonCameras, validFaceDatas.size())); FOREACH(i, cameras) cameras[i] = validFaceDatas[scorePodium[i]].idxView; return cameras; } IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const { float maxQuality = -1; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); } static bool IsFaceVisible(const MeshTexture::FaceDataArr& faceDatas, const IIndexArr& cameraList) { size_t camFoundCounter(0); for (const MeshTexture::FaceData& faceData : faceDatas) { const IIndex cfCam = faceData.idxView; for (IIndex camId : cameraList) { if (cfCam == camId) { if (++camFoundCounter == cameraList.size()) return true; break; } } } return camFoundCounter == cameraList.size(); } // build virtual faces with: // - similar normal // - high percentage of common images that see them void MeshTexture::CreateVirtualFaces(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const { const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); if (cosFaceToCenter < cosMaxNormalDeviation) continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif unsigned processedFaces(0); for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { if (faceData.idxView == idxView) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; break; } } } ASSERT(processedFaces > 0); virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); } void MeshTexture::CreateVirtualFaces2(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, const Mesh::FaceIdxArr& faceIndices, float thMaxNormalDeviation) const { // 使用正确的日志宏 VERBOSE("CreateVirtualFaces2: starting with %zu faces", faceIndices.size()); const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faceIndices); // 只处理传入的面片 // 确保面索引有效 for (FIndex idx : remainingFaces) { if (idx >= faces.size()) { VERBOSE("Invalid face index in input: %u (max: %zu)", idx, faces.size()); return; } } std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; // 确保remainingFaces不为空 if (remainingFaces.empty()) { VERBOSE("CreateVirtualFaces2: no faces to process"); return; } size_t iteration = 0; const size_t MAX_ITERATIONS = 1000000; // 防止无限循环 do { iteration++; if (iteration > MAX_ITERATIONS) { VERBOSE("CreateVirtualFaces2: exceeded max iterations (%zu)", MAX_ITERATIONS); break; } // 检查剩余面片是否为空 if (remainingFaces.empty()) { VERBOSE("CreateVirtualFaces2: no more faces to process"); break; } const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 验证中心面片ID有效性 if (virtualFaceCenterFaceID >= faces.size()) { VERBOSE("Invalid center face ID: %u (max: %zu)", virtualFaceCenterFaceID, faces.size()); remainingFaces.RemoveAtMove(startPos); continue; } VERBOSE("Processing virtual face center: %u (iteration %zu)", virtualFaceCenterFaceID, iteration); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; // 验证面数据有效性 if (virtualFaceCenterFaceID >= facesDatas.size()) { VERBOSE("Face data index out of bounds: %u (max: %zu)", virtualFaceCenterFaceID, facesDatas.size()); remainingFaces.RemoveAtMove(startPos); continue; } const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // 选择公共相机 Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { VERBOSE("Center face %u has no view data", virtualFaceCenterFaceID); virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); if (posToErase == Mesh::FaceIdxArr::NO_INDEX) { VERBOSE("Face %u not found in remaining faces", virtualFaceCenterFaceID); } else { remainingFaces.RemoveAtMove(posToErase); } } else { const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); // 验证选择的相机有效性 for (IIndex camIdx : selectedCams) { if (camIdx >= images.size()) { VERBOSE("Invalid camera index: %u (max: %zu)", camIdx, images.size()); return; } } currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); queuedFaces.insert(virtualFaceCenterFaceID); size_t queueIteration = 0; const size_t MAX_QUEUE_ITERATIONS = 100000; // 防止无限循环 do { queueIteration++; if (queueIteration > MAX_QUEUE_ITERATIONS) { VERBOSE("Queue processing exceeded max iterations (%zu)", MAX_QUEUE_ITERATIONS); break; } if (currentVirtualFaceQueue.IsEmpty()) { VERBOSE("Queue is empty"); break; } const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // 验证当前面片ID有效性 if (currentFaceId >= faces.size()) { VERBOSE("Invalid current face ID: %u (max: %zu)", currentFaceId, faces.size()); continue; } VERBOSE("Processing neighbor face: %u", currentFaceId); // 检查法线角度 const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); if (cosFaceToCenter < cosMaxNormalDeviation) { VERBOSE("Face %u normal angle too large (cos: %f)", currentFaceId, cosFaceToCenter); continue; } // 检查当前面是否被selectedCams中的所有相机看到 if (selectedCams.empty()) { VERBOSE("Selected cameras list is empty for face %u", currentFaceId); continue; } // 验证面数据索引有效性 if (currentFaceId >= facesDatas.size()) { VERBOSE("Face data index out of bounds: %u (max: %zu)", currentFaceId, facesDatas.size()); continue; } if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) { VERBOSE("Face %u not visible by all selected cameras", currentFaceId); continue; } // 从剩余面中移除并加入虚拟面 const auto posToErase = remainingFaces.FindFirst(currentFaceId); if (posToErase == Mesh::FaceIdxArr::NO_INDEX) { VERBOSE("Face %u already processed", currentFaceId); } else { remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); VERBOSE("Added face %u to virtual face (total: %zu)", currentFaceId, virtualFace.size()); } // 添加所有新邻居到队列 if (currentFaceId >= faceFaces.size()) { VERBOSE("FaceFaces index out of bounds: %u (max: %zu)", currentFaceId, faceFaces.size()); continue; } const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; // 验证邻居面片ID有效性 if (fIdx >= faces.size()) { VERBOSE("Invalid neighbor face ID: %u (max: %zu)", fIdx, faces.size()); continue; } if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); VERBOSE("Queued neighbor face: %u", fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); // 计算虚拟面质量和创建虚拟面 for (IIndex idxView : selectedCams) { // 验证视图索引有效性 if (idxView >= images.size()) { VERBOSE("Invalid view index: %u (max: %zu)", idxView, images.size()); continue; } FaceData virtualFaceData; virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif unsigned processedFaces = 0; for (FIndex fid : virtualFace) { // 验证面片ID有效性 if (fid >= facesDatas.size()) { VERBOSE("Invalid face ID in virtual face: %u (max: %zu)", fid, facesDatas.size()); continue; } const FaceDataArr& faceDatas = facesDatas[fid]; bool found = false; for (const FaceData& faceData : faceDatas) { if (faceData.idxView == idxView) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif processedFaces++; found = true; break; } } if (!found) { VERBOSE("Face %u has no data for view %u", fid, idxView); } } if (processedFaces > 0) { virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif virtualFaceDatas.emplace_back(virtualFaceData); VERBOSE("Added view %u to virtual face with %u faces", idxView, processedFaces); } else { VERBOSE("No valid data for view %u in virtual face", idxView); } } if (virtualFaceDatas.empty()) { VERBOSE("Virtual face has no valid views"); } } if (!virtualFace.empty()) { virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); VERBOSE("Created virtual face with %zu faces and %zu views", virtualFaces.back().size(), virtualFacesDatas.back().size()); } else { VERBOSE("Skipping empty virtual face"); } VERBOSE("Remaining faces: %zu", remainingFaces.size()); } while (!remainingFaces.empty()); VERBOSE("CreateVirtualFaces2: created %zu virtual faces", virtualFaces.size()); } void MeshTexture::CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const { if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 const float dynamicCosTh = COS(FD2R(dynamicThreshold)); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); // if (cosFaceToCenter < cosMaxNormalDeviation) // continue; if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; /* // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical // #endif // std::lock_guard lock(*scene.mesh.invalidFaces.mtx); // if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { // continue; // 跳过无效面 // } // 检查是否被所有选定相机有效看到 if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { continue; } //*/ // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif unsigned processedFaces(0); bool bInvalidFacesRelative = false; int invalidCount = 0; for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { if (faceData.idxView == idxView) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; if (faceData.bInvalidFacesRelative) ++invalidCount; break; } } } ASSERT(processedFaces > 0); virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces / 2); } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); } void MeshTexture::CreateVirtualFaces4(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, Mesh::FaceIdxArr& mapFaceToVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) { // 初始化数据结构 if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } const float ratioAngleToQuality(0.67f); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; // 创建面片到虚拟面片的映射(关键修复:确保安全访问) // Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // mapFaceToVirtualFace.Memset(NO_ID); // 初始化为无效值 // 关键参数:限制虚拟面片大小和数量 const size_t MAX_VIRTUAL_FACE_SIZE = 50; const size_t MAX_TOTAL_VIRTUAL_FACES = 5000; // 主循环:创建虚拟面片 while (!remainingFaces.empty() && virtualFaces.size() < MAX_TOTAL_VIRTUAL_FACES) { // 随机选择起始面片 const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 关键安全修复:检查面片ID是否在有效范围内 if (virtualFaceCenterFaceID >= faces.size() || virtualFaceCenterFaceID >= meshCurvatures.size() || virtualFaceCenterFaceID >= scene.mesh.faceNormals.size() || virtualFaceCenterFaceID >= facesDatas.size()) { // DEBUG_EXTRA("Warning: Invalid center face ID: %u (max faces: %zu)", // virtualFaceCenterFaceID, faces.size()); remainingFaces.RemoveAtMove(startPos); continue; } // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; const float dynamicCosTh = COS(FD2R(dynamicThreshold)); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // 跳过无效中心面片 if (centerFaceDatas.empty()) { // DEBUG_EXTRA("Warning: Center face %u has no view data", virtualFaceCenterFaceID); remainingFaces.RemoveAtMove(startPos); continue; } // 选择公共相机 IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); // 严格筛选有效视图 if (selectedCams.size() < minCommonCameras) { // DEBUG_EXTRA("Warning: Insufficient common cameras for face %u: %u < %u", // virtualFaceCenterFaceID, selectedCams.size(), minCommonCameras); remainingFaces.RemoveAtMove(startPos); continue; } // 创建新虚拟面片 Mesh::FaceIdxArr currentVirtualFace; FaceDataArr currentVirtualFaceDatas; currentVirtualFace.push_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; mapFaceToVirtualFace[virtualFaceCenterFaceID] = virtualFaces.size(); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); queuedFaces.insert(virtualFaceCenterFaceID); size_t faceCount = 1; // 扩展虚拟面片 while (!currentVirtualFaceQueue.IsEmpty() && faceCount < MAX_VIRTUAL_FACE_SIZE) { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // 关键安全修复:检查当前面片ID是否在有效范围内 if (currentFaceId >= faces.size() || currentFaceId >= scene.mesh.faceNormals.size() || currentFaceId >= faceFaces.size() || currentFaceId >= facesDatas.size()) { // DEBUG_EXTRA("Warning: Invalid current face ID: %u (max faces: %zu)", // currentFaceId, faces.size()); continue; } // 法线相似性检查 const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float dotProduct = normalCenter.dot(faceNormal); if (dotProduct < dynamicCosTh) { continue; } // 视图有效性检查 bool bValidInAllCameras = true; for (IIndex view : selectedCams) { bool found = false; for (const FaceData& fd : facesDatas[currentFaceId]) { if (fd.idxView == view && !fd.bInvalidFacesRelative) { found = true; break; } } if (!found) { bValidInAllCameras = false; break; } } if (!bValidInAllCameras) continue; // 添加到虚拟面片 if (!selectedFaces[currentFaceId]) { selectedFaces[currentFaceId] = true; currentVirtualFace.push_back(currentFaceId); mapFaceToVirtualFace[currentFaceId] = virtualFaces.size(); faceCount++; } // 添加邻居面片 const Mesh::FaceFaces& adjFaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; i++) { const FIndex neighborFace = adjFaces[i]; if (neighborFace == NO_ID) continue; // 关键安全修复:检查邻居面片ID是否在有效范围内 if (neighborFace >= faces.size()) { // DEBUG_EXTRA("Warning: Invalid neighbor face ID: %u (max faces: %zu)", // neighborFace, faces.size()); continue; } if (!selectedFaces[neighborFace] && queuedFaces.find(neighborFace) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(neighborFace); queuedFaces.insert(neighborFace); } } } // 确保虚拟面片足够大 if (currentVirtualFace.size() >= minCommonCameras) { // 创建虚拟面片数据 for (IIndex idxView : selectedCams) { FaceData virtualFaceData; virtualFaceData.quality = 0.0f; virtualFaceData.idxView = idxView; virtualFaceData.bInvalidFacesRelative = false; unsigned processedFaces = 0; for (FIndex fid : currentVirtualFace) { // 关键安全修复:检查面片ID是否在有效范围内 if (fid >= facesDatas.size()) { // DEBUG_EXTRA("Warning: Invalid face ID in virtual face: %u (max: %zu)", // fid, facesDatas.size()); continue; } for (const FaceData& fd : facesDatas[fid]) { if (fd.idxView == idxView && !fd.bInvalidFacesRelative) { virtualFaceData.quality += fd.quality; processedFaces++; break; } } } if (processedFaces > 0) { virtualFaceData.quality /= processedFaces; currentVirtualFaceDatas.push_back(virtualFaceData); } } if (!currentVirtualFaceDatas.empty()) { virtualFacesDatas.push_back(currentVirtualFaceDatas); virtualFaces.push_back(currentVirtualFace); } } // 从剩余面片中移除已处理面片 for (FIndex fid : currentVirtualFace) { const auto pos = remainingFaces.Find(fid); if (pos != Mesh::FaceIdxArr::NO_INDEX) { remainingFaces.RemoveAtMove(pos); } } } // 处理剩余面片(简化版) for (FIndex fid : remainingFaces) { // 关键安全修复:检查面片ID是否在有效范围内 if (fid >= faces.size() || fid >= facesDatas.size()) { // DEBUG_EXTRA("Warning: Invalid remaining face ID: %u (max: %zu)", // fid, faces.size()); continue; } const FaceDataArr& faceDatas = facesDatas[fid]; if (faceDatas.empty()) continue; // 创建单面虚拟面片 Mesh::FaceIdxArr singleFace = {fid}; virtualFaces.push_back(singleFace); mapFaceToVirtualFace[fid] = virtualFaces.size() - 1; FaceDataArr singleFaceDatas; for (const FaceData& fd : faceDatas) { if (!fd.bInvalidFacesRelative) { singleFaceDatas.push_back(fd); } } virtualFacesDatas.push_back(singleFaceDatas); } // 关键安全修复:确保每个面片都有有效的虚拟面片映射 for (FIndex fid = 0; fid < faces.size(); fid++) { if (mapFaceToVirtualFace[fid] == NO_ID || mapFaceToVirtualFace[fid] >= virtualFaces.size()) { // 创建默认虚拟面片 Mesh::FaceIdxArr defaultVirtualFace = {fid}; virtualFaces.push_back(defaultVirtualFace); // 创建默认数据 FaceDataArr defaultData; if (fid < facesDatas.size()) { for (const FaceData& fd : facesDatas[fid]) { if (!fd.bInvalidFacesRelative) { defaultData.push_back(fd); } } } virtualFacesDatas.push_back(defaultData); // 更新映射 mapFaceToVirtualFace[fid] = virtualFaces.size() - 1; // DEBUG_EXTRA("Fixed mapping for face %u -> virtual face %zu", // fid, virtualFaces.size() - 1); } } // 最终验证映射关系 size_t validMappings = 0; size_t invalidMappings = 0; for (FIndex fid = 0; fid < faces.size(); fid++) { size_t virtualIdx = mapFaceToVirtualFace[fid]; if (virtualIdx >= virtualFaces.size()) { virtualIdx = 0; // 安全回退值 mapFaceToVirtualFace[fid] = 0; } if (virtualIdx < virtualFaces.size()) { validMappings++; } else { invalidMappings++; // DEBUG_EXTRA("Critical error: Face %u mapped to invalid virtual face %zu", // fid, virtualIdx); } } // DEBUG_EXTRA("Created %zu virtual faces with %zu valid mappings and %zu invalid mappings", // virtualFaces.size(), validMappings, invalidMappings); // 保存映射到成员变量,供后续使用 // m_mapFaceToVirtualFace = mapFaceToVirtualFace; } void MeshTexture::CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const { if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 const float dynamicCosTh = COS(FD2R(dynamicThreshold)); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); // if (cosFaceToCenter < cosMaxNormalDeviation) // continue; if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; /* // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical // #endif // std::lock_guard lock(*scene.mesh.invalidFaces.mtx); // if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { // continue; // 跳过无效面 // } // 检查是否被所有选定相机有效看到 if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { continue; } //*/ // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif unsigned processedFaces(0); bool bInvalidFacesRelative = false; int invalidCount = 0; for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { /* // if (faceData.idxView == idxView) { if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; if (faceData.bInvalidFacesRelative) ++invalidCount; // break; } //*/ /* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if (fd.idxView != idxView) { ++nViewCount; } } if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ //* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if ( faceData.bInvalidFacesRelative) { ++nViewCount; } } if (faceData.bInvalidFacesRelative) { } else { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ } } ASSERT(processedFaces > 0); virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); // virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); } bool MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const { if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } float thMaxColorDeviation = 130.0f; const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; // Precompute average color for each face Colors faceColors; // 创建一个空列表 faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) for (size_t i = 0; i < faces.size(); ++i) { faceColors.push_back(Color::ZERO); // 逐个添加元素 } for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { const FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty()) continue; Color sumColor = Color::ZERO; for (const FaceData& fd : faceDatas) { sumColor += fd.color; } faceColors[idxFace] = sumColor / faceDatas.size(); } do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 const float dynamicCosTh = COS(FD2R(dynamicThreshold)); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // 检查中心面片是否包含无效视图 bool bHasInvalidView = false; int nInvalidViewCount = 0; int nTotalViewCount = 0; for (const FaceData& faceData : centerFaceDatas) { if (faceData.bInvalidFacesRelative) { bHasInvalidView = true; ++nInvalidViewCount; // break; } ++nTotalViewCount; } std::vector> sortedViews; std::vector> sortedLuminViews; std::vector> validViews; sortedViews.reserve(centerFaceDatas.size()); for (const FaceData& fd : centerFaceDatas) { if (fd.bInvalidFacesRelative) { // invalidView = fd.idxView; // invalidQuality = fd.quality; sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); } else { sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); validViews.emplace_back(fd.quality, fd.color); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); std::sort(validViews.begin(), validViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); int nSize = sortedViews.size(); // int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); // 计算初始平均值 float totalQuality = 0.0f; Color totalColor(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality += sortedViews[n].first; totalColor += sortedViews[n].second; } const float avgQuality = totalQuality / nSize; const Color avgColor = totalColor / nSize; float totalLuminance = MeshTexture::GetLuminance(totalColor); float avgLuminance = totalLuminance / nSize; std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), [avgLuminance](const auto& a, const auto& b) { float luminDistA = cv::norm(avgLuminance - a.first); float luminDistB = cv::norm(avgLuminance - b.first); return luminDistA < luminDistB; }); // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); //* // 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal) const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; // 过滤selectedCams:只保留夹角小于30度的视图 IIndexArr filteredCams; // 用于存储过滤后的视图索引 for (IIndex idxView : selectedCams) { const Image& imageData = images[idxView]; // 计算相机在世界坐标系中的朝向向量(相机镜面法线) const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他 // 相机局部坐标系中的向前向量 (0,0,-1) Point3f localForward(0.0f, 0.0f, -1.0f); // 手动计算矩阵乘法:cameraForward = R * localForward Point3f cameraForward; cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z; cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z; cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z; // 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数) float norm = std::sqrt(cameraForward.x * cameraForward.x + cameraForward.y * cameraForward.y + cameraForward.z * cameraForward.z); if (norm > 0.0f) { cameraForward.x /= norm; cameraForward.y /= norm; cameraForward.z /= norm; } else { // 处理零向量的情况,赋予默认值 cameraForward = Point3f(0, 0, -1); } // 计算夹角余弦值 - 使用已声明的 normalCenter // 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换 Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例 float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来) float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度 std::string strPath = imageData.name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); // printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID); if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID)) { if (scene.is_face_edge(strName, virtualFaceCenterFaceID)) { // printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit); if (angleDeg <= 45.0f) { filteredCams.push_back(idxView); } } else { filteredCams.push_back(idxView); } } } // 确保 selectedCams 是非 const 的,才能对其进行赋值 // 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr) if (filteredCams.empty()) { // 处理所有视图都被过滤的情况... // DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition."); // selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); selectedCams = filteredCams; isVirtualFace[virtualFaceCenterFaceID] = false; } else { selectedCams = filteredCams; isVirtualFace[virtualFaceCenterFaceID] = true; } //*/ currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); // if (cosFaceToCenter < cosMaxNormalDeviation) // continue; if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; // Check color similarity const Color& centerColor = faceColors[virtualFaceCenterFaceID]; const Color& currentColor = faceColors[currentFaceId]; // if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) { float colorDistance = cv::norm(centerColor - currentColor); // printf("1colorDistance=%f\n", colorDistance); if (colorDistance > thMaxColorDeviation) { // printf("2colorDistance=%f\n", colorDistance); // continue; // Skip if color difference is too large } } /* // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical // #endif // std::lock_guard lock(*scene.mesh.invalidFaces.mtx); // if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { // continue; // 跳过无效面 // } // 检查是否被所有选定相机有效看到 if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { continue; } //*/ // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); /* if (selectedCams.empty()) { const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.color = medianColor; virtualFaceData.quality = medianQuality; } */ // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif int invalidQuality = 0; Color invalidColor = Point3f::ZERO; unsigned processedFaces(0); bool bInvalidFacesRelative = false; int invalidCount = 0; for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { /* if (faceData.idxView == idxView) { // if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; if (faceData.bInvalidFacesRelative) ++invalidCount; break; } //*/ /* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if (fd.idxView != idxView) { ++nViewCount; } } if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ //* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if ( faceData.bInvalidFacesRelative) { ++nViewCount; } } // if (faceData.bInvalidFacesRelative) if (bHasInvalidView) { // invalidQuality += faceData.quality; // #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // invalidColor += faceData.color; // #endif ++processedFaces; } else { // virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ } } float maxLuminance = 120.0f; float minLuminance = 90.0f; int validViewsSize = validViews.size(); // bHasInvalidView = true; if (bHasInvalidView) { // 使用鲁棒的统计方法计算颜色和亮度的中心值 const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; const float medianLuminance = ComputeMedianLuminance(sortedViews); // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 const float colorMAD = ComputeColorMAD(sortedViews, medianColor); const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) const float maxColorDeviation = 0.01f * colorMAD; const float maxLuminanceDeviation = 0.01f * luminanceMAD; std::vector validIndices; for (int n = 0; n < sortedViews.size(); ++n) { const Color& viewColor = sortedViews[n].second; const float viewLuminance = MeshTexture::GetLuminance(viewColor); const float colorDistance = cv::norm(viewColor - medianColor); const float luminanceDistance = std::abs(viewLuminance - medianLuminance); if (colorDistance <= maxColorDeviation && luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } else { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); bool bColorSimilarity = true; // Check color similarity const Color& centerColor = faceColors[virtualFaceCenterFaceID]; const Color& currentColor = faceColors[currentFaceId]; float colorDistance = cv::norm(centerColor - currentColor); // printf("1colorDistance=%f\n", colorDistance); if (colorDistance > thMaxColorDeviation) { // printf("2colorDistance=%f\n", colorDistance); bColorSimilarity = false; } // if ((cosFaceToCenter 3) ? 3 : sortedViews.size(); // // 计算初始平均值 // float totalQuality = 0.0f; // Color totalColor(0,0,0); // for (int n = 0; n < nSize; ++n) { // totalQuality += sortedViews[n].first; // totalColor += sortedViews[n].second; // } // const float avgQuality = totalQuality / nSize; // const Color avgColor = totalColor / nSize; // 过滤偏差过大的视图 // std::vector validIndices; float maxColorDeviation = 0.01f; // 颜色偏差阈值 float maxLuminanceDeviation = 0.01f; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); // printf("colorDistance=%f\n", colorDistance); float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); // printf("viewLuminance=%f\n", viewLuminance); // if ((colorDistance<=maxColorDeviation)&& // (viewLuminance<=maxLuminance)&& // (viewLuminance>=minLuminance)){ if ((colorDistance <= maxColorDeviation) && (luminanceDistance <= maxLuminanceDeviation)) { // validIndices.push_back(n); } } //* if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); if (luminanceDistance <= maxLuminanceDeviation){ // validIndices.push_back(n); } } } if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); if (colorDistance<=maxColorDeviation){ // validIndices.push_back(n); } } } //*/ /* float maxColorDeviation2 = 0.05f; if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); if (colorDistance <= maxColorDeviation2) { validIndices.push_back(n); } } } //*/ /* float totalLuminance = MeshTexture::GetLuminance(totalColor); float avgLuminance = totalLuminance / nSize; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); if (luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } } //*/ // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif // virtualFaceData.quality = avgQuality; // virtualFaceData.color = sortedLuminViews[0].second; virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += sortedViews[idx].first; totalColor2 += sortedViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } else if (validViewsSize>0&&validViewsSize<=2&&false) { /* virtualFaceData.quality = 0; virtualFaceData.color = Point3f::ZERO; // int nSize = (validViews.size()>1) ? 1 : validViews.size(); int nSize = validViews.size(); for (int n=0; n 3) ? 3 : validViews.size(); // 计算初始平均值 float totalQuality2 = 0.0f; Color totalColor2(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality2 += validViews[n].first; totalColor2 += validViews[n].second; } const float avgQuality2 = totalQuality2 / nSize; const Color avgColor2 = totalColor2 / nSize; // 过滤偏差过大的视图 // std::vector validIndices; float maxColorDeviation = 0.01f; // 颜色偏差阈值 for (int n = 0; n < nSize; ++n) { const Color& viewColor = validViews[n].second; float colorDistance = cv::norm(avgColor2 - viewColor); // printf("colorDistance=%f\n", colorDistance); float viewLuminance = MeshTexture::GetLuminance(viewColor); if ((colorDistance<=maxColorDeviation)&& (viewLuminance<=120.0f)){ // if (colorDistance <= maxColorDeviation) { // validIndices.push_back(n); } } /* // float totalLuminance = MeshTexture::GetLuminance(totalColor); // float avgLuminance = totalLuminance / nSize; float maxLuminanceDeviation = 0.01f; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); // printf("luminanceDistance=%f\n", luminanceDistance); if (luminanceDistance <= maxLuminanceDeviation) { // validIndices.push_back(n); } } //*/ // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; // virtualFaceData.color = sortedLuminViews[0].second; /* for (int n = 0; n < nSize; ++n) { float lumin = sortedLuminViews[n].first; if (lumin>=minLuminance&&lumin<=maxLuminance) { // virtualFaceData.quality = avgQuality; // virtualFaceData.color = sortedLuminViews[0].second; break; } } //*/ } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } else { //* ASSERT(processedFaces > 0); // virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color /= processedFaces; #endif virtualFaceData.quality = 0; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif //*/ /* // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty() || validViews.size() <= 0) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif // virtualFaceData.quality = medianQuality; // virtualFaceData.color = medianColor; virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } } else { // 使用鲁棒的统计方法计算颜色和亮度的中心值 const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; const float medianLuminance = ComputeMedianLuminance(sortedViews); // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 const float colorMAD = ComputeColorMAD(sortedViews, medianColor); const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) const float maxColorDeviation = 0.01f * colorMAD; const float maxLuminanceDeviation = 0.01f * luminanceMAD; std::vector validIndices; for (int n = 0; n < sortedViews.size(); ++n) { const Color& viewColor = sortedViews[n].second; const float viewLuminance = MeshTexture::GetLuminance(viewColor); const float colorDistance = cv::norm(viewColor - medianColor); const float luminanceDistance = std::abs(viewLuminance - medianLuminance); // if (colorDistance <= maxColorDeviation && // luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } } if (validIndices.empty()) { virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } } // virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); // virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); return true; } bool MeshTexture::CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const { if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } float thMaxColorDeviation = 130.0f; const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; // Precompute average color for each face Colors faceColors; // 创建一个空列表 faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) for (size_t i = 0; i < faces.size(); ++i) { faceColors.push_back(Color::ZERO); // 逐个添加元素 } for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { const FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty()) continue; Color sumColor = Color::ZERO; for (const FaceData& fd : faceDatas) { sumColor += fd.color; } faceColors[idxFace] = sumColor / faceDatas.size(); } do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 const float dynamicCosTh = COS(FD2R(dynamicThreshold)); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // 检查中心面片是否包含无效视图 bool bHasInvalidView = false; int nInvalidViewCount = 0; int nTotalViewCount = 0; for (const FaceData& faceData : centerFaceDatas) { if (faceData.bInvalidFacesRelative) { bHasInvalidView = true; ++nInvalidViewCount; // break; } ++nTotalViewCount; } std::vector> sortedViews; std::vector> sortedLuminViews; std::vector> validViews; sortedViews.reserve(centerFaceDatas.size()); for (const FaceData& fd : centerFaceDatas) { if (fd.bInvalidFacesRelative) { // invalidView = fd.idxView; // invalidQuality = fd.quality; sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); } else { sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); validViews.emplace_back(fd.quality, fd.color); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); std::sort(validViews.begin(), validViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); int nSize = sortedViews.size(); // int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); // 计算初始平均值 float totalQuality = 0.0f; Color totalColor(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality += sortedViews[n].first; totalColor += sortedViews[n].second; } const float avgQuality = totalQuality / nSize; const Color avgColor = totalColor / nSize; float totalLuminance = MeshTexture::GetLuminance(totalColor); float avgLuminance = totalLuminance / nSize; std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), [avgLuminance](const auto& a, const auto& b) { float luminDistA = cv::norm(avgLuminance - a.first); float luminDistB = cv::norm(avgLuminance - b.first); return luminDistA < luminDistB; }); // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); /* // 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal) const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; // 过滤selectedCams:只保留夹角小于30度的视图 IIndexArr filteredCams; // 用于存储过滤后的视图索引 for (IIndex idxView : selectedCams) { const Image& imageData = images[idxView]; // 计算相机在世界坐标系中的朝向向量(相机镜面法线) const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他 // 相机局部坐标系中的向前向量 (0,0,-1) Point3f localForward(0.0f, 0.0f, -1.0f); // 手动计算矩阵乘法:cameraForward = R * localForward Point3f cameraForward; cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z; cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z; cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z; // 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数) float norm = std::sqrt(cameraForward.x * cameraForward.x + cameraForward.y * cameraForward.y + cameraForward.z * cameraForward.z); if (norm > 0.0f) { cameraForward.x /= norm; cameraForward.y /= norm; cameraForward.z /= norm; } else { // 处理零向量的情况,赋予默认值 cameraForward = Point3f(0, 0, -1); } // 计算夹角余弦值 - 使用已声明的 normalCenter // 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换 Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例 float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来) float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度 std::string strPath = imageData.name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); // printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID); if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID)) { if (scene.is_face_edge(strName, virtualFaceCenterFaceID)) { // printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit); if (angleDeg <= 45.0f) { filteredCams.push_back(idxView); } } else { filteredCams.push_back(idxView); } } } // 确保 selectedCams 是非 const 的,才能对其进行赋值 // 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr) if (filteredCams.empty()) { // 处理所有视图都被过滤的情况... // DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition."); // selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); selectedCams = filteredCams; isVirtualFace[virtualFaceCenterFaceID] = false; } else { selectedCams = filteredCams; isVirtualFace[virtualFaceCenterFaceID] = true; } //*/ currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); // if (cosFaceToCenter < cosMaxNormalDeviation) // continue; if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; // Check color similarity const Color& centerColor = faceColors[virtualFaceCenterFaceID]; const Color& currentColor = faceColors[currentFaceId]; // if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) { float colorDistance = cv::norm(centerColor - currentColor); // printf("1colorDistance=%f\n", colorDistance); if (colorDistance > thMaxColorDeviation) { // printf("2colorDistance=%f\n", colorDistance); // continue; // Skip if color difference is too large } } /* // #ifdef TEXOPT_USE_OPENMP // #pragma omp critical // #endif // std::lock_guard lock(*scene.mesh.invalidFaces.mtx); // if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { // continue; // 跳过无效面 // } // 检查是否被所有选定相机有效看到 if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { continue; } //*/ // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); /* if (selectedCams.empty()) { const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.color = medianColor; virtualFaceData.quality = medianQuality; } */ // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif int invalidQuality = 0; Color invalidColor = Point3f::ZERO; unsigned processedFaces(0); bool bInvalidFacesRelative = false; int invalidCount = 0; for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { /* // if (faceData.idxView == idxView) { if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; if (faceData.bInvalidFacesRelative) ++invalidCount; break; } //*/ /* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if (fd.idxView != idxView) { ++nViewCount; } } if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ /* int nViewCount = 0; if (faceData.idxView == idxView) { for (const FaceData& fd : faceDatas) { if ( faceData.bInvalidFacesRelative) { ++nViewCount; } } // if (faceData.bInvalidFacesRelative) if (bHasInvalidView) { // invalidQuality += faceData.quality; // #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // invalidColor += faceData.color; // #endif ++processedFaces; } else { // virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color += faceData.color; #endif ++processedFaces; // break; } } //*/ } } float maxLuminance = 120.0f; float minLuminance = 90.0f; int validViewsSize = validViews.size(); bHasInvalidView = true; if (bHasInvalidView) { // 使用鲁棒的统计方法计算颜色和亮度的中心值 const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; const float medianLuminance = ComputeMedianLuminance(sortedViews); // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 const float colorMAD = ComputeColorMAD(sortedViews, medianColor); const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) const float maxColorDeviation = 0.01f * colorMAD; const float maxLuminanceDeviation = 0.01f * luminanceMAD; std::vector validIndices; for (int n = 0; n < sortedViews.size(); ++n) { const Color& viewColor = sortedViews[n].second; const float viewLuminance = MeshTexture::GetLuminance(viewColor); const float colorDistance = cv::norm(viewColor - medianColor); const float luminanceDistance = std::abs(viewLuminance - medianLuminance); if (colorDistance <= maxColorDeviation && luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } else { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); bool bColorSimilarity = true; // Check color similarity const Color& centerColor = faceColors[virtualFaceCenterFaceID]; const Color& currentColor = faceColors[currentFaceId]; float colorDistance = cv::norm(centerColor - currentColor); // printf("1colorDistance=%f\n", colorDistance); if (colorDistance > thMaxColorDeviation) { // printf("2colorDistance=%f\n", colorDistance); bColorSimilarity = false; } // if ((cosFaceToCenter 3) ? 3 : sortedViews.size(); // // 计算初始平均值 // float totalQuality = 0.0f; // Color totalColor(0,0,0); // for (int n = 0; n < nSize; ++n) { // totalQuality += sortedViews[n].first; // totalColor += sortedViews[n].second; // } // const float avgQuality = totalQuality / nSize; // const Color avgColor = totalColor / nSize; // 过滤偏差过大的视图 // std::vector validIndices; float maxColorDeviation = 0.01f; // 颜色偏差阈值 float maxLuminanceDeviation = 0.01f; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); // printf("colorDistance=%f\n", colorDistance); float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); // printf("viewLuminance=%f\n", viewLuminance); // if ((colorDistance<=maxColorDeviation)&& // (viewLuminance<=maxLuminance)&& // (viewLuminance>=minLuminance)){ if ((colorDistance <= maxColorDeviation) && (luminanceDistance <= maxLuminanceDeviation)) { // validIndices.push_back(n); } } //* if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); if (luminanceDistance <= maxLuminanceDeviation){ // validIndices.push_back(n); } } } if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); if (colorDistance<=maxColorDeviation){ // validIndices.push_back(n); } } } //*/ /* float maxColorDeviation2 = 0.05f; if (validIndices.empty()) { for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float colorDistance = cv::norm(avgColor - viewColor); if (colorDistance <= maxColorDeviation2) { validIndices.push_back(n); } } } //*/ /* float totalLuminance = MeshTexture::GetLuminance(totalColor); float avgLuminance = totalLuminance / nSize; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); if (luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } } //*/ // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif // virtualFaceData.quality = avgQuality; // virtualFaceData.color = sortedLuminViews[0].second; virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += sortedViews[idx].first; totalColor2 += sortedViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } else if (validViewsSize>0&&validViewsSize<=2&&false) { /* virtualFaceData.quality = 0; virtualFaceData.color = Point3f::ZERO; // int nSize = (validViews.size()>1) ? 1 : validViews.size(); int nSize = validViews.size(); for (int n=0; n 3) ? 3 : validViews.size(); // 计算初始平均值 float totalQuality2 = 0.0f; Color totalColor2(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality2 += validViews[n].first; totalColor2 += validViews[n].second; } const float avgQuality2 = totalQuality2 / nSize; const Color avgColor2 = totalColor2 / nSize; // 过滤偏差过大的视图 // std::vector validIndices; float maxColorDeviation = 0.01f; // 颜色偏差阈值 for (int n = 0; n < nSize; ++n) { const Color& viewColor = validViews[n].second; float colorDistance = cv::norm(avgColor2 - viewColor); // printf("colorDistance=%f\n", colorDistance); float viewLuminance = MeshTexture::GetLuminance(viewColor); if ((colorDistance<=maxColorDeviation)&& (viewLuminance<=120.0f)){ // if (colorDistance <= maxColorDeviation) { // validIndices.push_back(n); } } /* // float totalLuminance = MeshTexture::GetLuminance(totalColor); // float avgLuminance = totalLuminance / nSize; float maxLuminanceDeviation = 0.01f; for (int n = 0; n < nSize; ++n) { const Color& viewColor = sortedViews[n].second; float viewLuminance = MeshTexture::GetLuminance(viewColor); float luminanceDistance = cv::norm(avgLuminance - viewLuminance); // printf("luminanceDistance=%f\n", luminanceDistance); if (luminanceDistance <= maxLuminanceDeviation) { // validIndices.push_back(n); } } //*/ // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty()) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; // virtualFaceData.color = sortedLuminViews[0].second; /* for (int n = 0; n < nSize; ++n) { float lumin = sortedLuminViews[n].first; if (lumin>=minLuminance&&lumin<=maxLuminance) { // virtualFaceData.quality = avgQuality; // virtualFaceData.color = sortedLuminViews[0].second; break; } } //*/ } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } else { //* ASSERT(processedFaces > 0); // virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color /= processedFaces; #endif virtualFaceData.quality = 0; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif //*/ /* // 如果所有视图都被排除,保留原始平均值 if (validIndices.empty() || validViews.size() <= 0) { // virtualFaceData.quality = avgQuality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // virtualFaceData.color = avgColor; #endif // virtualFaceData.quality = medianQuality; // virtualFaceData.color = medianColor; virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { const Color& viewColor = sortedViews[idx].second; float colorDistance = cv::norm(avgColor - viewColor); float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second * weight; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } //*/ } } else { // 使用鲁棒的统计方法计算颜色和亮度的中心值 const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; const float medianLuminance = ComputeMedianLuminance(sortedViews); // 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 const float colorMAD = ComputeColorMAD(sortedViews, medianColor); const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); // 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) const float maxColorDeviation = 0.01f * colorMAD; const float maxLuminanceDeviation = 0.01f * luminanceMAD; std::vector validIndices; for (int n = 0; n < sortedViews.size(); ++n) { const Color& viewColor = sortedViews[n].second; const float viewLuminance = MeshTexture::GetLuminance(viewColor); const float colorDistance = cv::norm(viewColor - medianColor); const float luminanceDistance = std::abs(viewLuminance - medianLuminance); // if (colorDistance <= maxColorDeviation && // luminanceDistance <= maxLuminanceDeviation) { validIndices.push_back(n); } } if (validIndices.empty()) { virtualFaceData.quality = medianQuality; virtualFaceData.color = medianColor; } else { // 使用过滤后的视图重新计算平均值 float totalQuality2 = 0.0f; Color totalColor2 = Color(0,0,0); for (int idx : validIndices) { totalQuality2 += validViews[idx].first; totalColor2 += validViews[idx].second; } virtualFaceData.quality = totalQuality2 / validIndices.size(); virtualFaceData.color = totalColor2 / validIndices.size(); } } // virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); // virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); return true; } /* void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const { float thMaxColorDeviation = 0.000001f; if (meshCurvatures.empty()) { ComputeFaceCurvatures(); } const float ratioAngleToQuality(0.67f); const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); Mesh::FaceIdxArr remainingFaces(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); std::vector selectedFaces(faces.size(), false); cQueue currentVirtualFaceQueue; std::unordered_set queuedFaces; // Precompute average color for each face Colors faceColors; // 创建一个空列表 faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) for (size_t i = 0; i < faces.size(); ++i) { faceColors.push_back(Color::ZERO); // 逐个添加元素 } for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { const FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty()) continue; Color sumColor = Color::ZERO; for (const FaceData& fd : faceDatas) { sumColor += fd.color; } faceColors[idxFace] = sumColor / faceDatas.size(); } do { const FIndex startPos = RAND() % remainingFaces.size(); const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; // 动态法线阈值 const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 const float dynamicCosTh = COS(FD2R(dynamicThreshold)); ASSERT(currentVirtualFaceQueue.IsEmpty()); const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; // 检查中心面片是否包含无效视图 bool bHasInvalidView = false; int nInvalidViewCount = 0; int nTotalViewCount = 0; for (const FaceData& faceData : centerFaceDatas) { if (faceData.bInvalidFacesRelative) { bHasInvalidView = true; ++nInvalidViewCount; } ++nTotalViewCount; } std::vector> sortedViews; std::vector> sortedLuminViews; std::vector> validViews; sortedViews.reserve(centerFaceDatas.size()); for (const FaceData& fd : centerFaceDatas) { if (fd.bInvalidFacesRelative) { sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); } else { sortedViews.emplace_back(fd.quality, fd.color); sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); validViews.emplace_back(fd.quality, fd.color); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); std::sort(validViews.begin(), validViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); int nSize = sortedViews.size(); // 计算初始平均值 float totalQuality = 0.0f; Color totalColor(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality += sortedViews[n].first; totalColor += sortedViews[n].second; } const float avgQuality = totalQuality / nSize; const Color avgColor = totalColor / nSize; float totalLuminance = MeshTexture::GetLuminance(totalColor); float avgLuminance = totalLuminance / nSize; std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), [avgLuminance](const auto& a, const auto& b) { float luminDistA = cv::norm(avgLuminance - a.first); float luminDistB = cv::norm(avgLuminance - b.first); return luminDistA < luminDistB; }); // select the common cameras Mesh::FaceIdxArr virtualFace; FaceDataArr virtualFaceDatas; if (centerFaceDatas.empty()) { virtualFace.emplace_back(virtualFaceCenterFaceID); selectedFaces[virtualFaceCenterFaceID] = true; const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); } else { const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); queuedFaces.clear(); do { const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); currentVirtualFaceQueue.PopHead(); // check for condition to add in current virtual face // normal angle smaller than thMaxNormalDeviation degrees const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 continue; // check if current face is seen by all cameras in selectedCams ASSERT(!selectedCams.empty()); if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) continue; // Check color similarity const Color& centerColor = faceColors[virtualFaceCenterFaceID]; const Color& currentColor = faceColors[currentFaceId]; if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) { float colorDistance = cv::norm(centerColor - currentColor); if (colorDistance > thMaxColorDeviation) { continue; // Skip if color difference is too large } } // remove it from remaining faces and add it to the virtual face { const auto posToErase = remainingFaces.FindFirst(currentFaceId); ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); remainingFaces.RemoveAtMove(posToErase); selectedFaces[currentFaceId] = true; virtualFace.push_back(currentFaceId); } // add all new neighbors to the queue const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; for (int i = 0; i < 3; ++i) { const FIndex fIdx = ffaces[i]; if (fIdx == NO_ID) continue; if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { currentVirtualFaceQueue.AddTail(fIdx); queuedFaces.emplace(fIdx); } } } while (!currentVirtualFaceQueue.IsEmpty()); // compute virtual face quality and create virtual face for (IIndex idxView: selectedCams) { FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); virtualFaceData.quality = 0; virtualFaceData.idxView = idxView; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color = Point3f::ZERO; #endif int invalidQuality = 0; Color invalidColor = Point3f::ZERO; unsigned processedFaces(0); bool bInvalidFacesRelative = false; int invalidCount = 0; for (FIndex fid : virtualFace) { const FaceDataArr& faceDatas = facesDatas[fid]; for (FaceData& faceData: faceDatas) { // 填充: 只处理当前视图的数据,累加质量和颜色 if (faceData.idxView == idxView) { virtualFaceData.quality += faceData.quality; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color += faceData.color; #endif processedFaces++; if (faceData.bInvalidFacesRelative) { invalidCount++; } break; // 每个面片每个视图只应有一个数据,找到后退出内层循环 } } } // 填充: 后处理,计算平均值和设置无效标志 if (processedFaces > 0) { virtualFaceData.quality /= processedFaces; #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA virtualFaceData.color /= processedFaces; #endif virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces / 2); // 如果超过一半面片无效,则标记虚拟面无效 } else { // 如果没有找到任何数据,移除刚添加的virtualFaceData virtualFaceDatas.pop_back(); } } ASSERT(!virtualFaceDatas.empty()); } virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); virtualFaces.emplace_back(std::move(virtualFace)); } while (!remainingFaces.empty()); } */ #if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_MEDIAN // decrease the quality of / remove all views in which the face's projection // has a much different color than in the majority of views bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const { // consider as outlier if the absolute difference to the median is outside this threshold if (thOutlier <= 0) thOutlier = 0.15f*255.f; // init colors array if (faceDatas.size() <= 3) return false; FloatArr channels[3]; for (int c=0; c<3; ++c) channels[c].resize(faceDatas.size()); FOREACH(i, faceDatas) { const Color& color = faceDatas[i].color; for (int c=0; c<3; ++c) channels[c][i] = color[c]; } // find median for (int c=0; c<3; ++c) channels[c].Sort(); const unsigned idxMedian(faceDatas.size() >> 1); Color median; for (int c=0; c<3; ++c) median[c] = channels[c][idxMedian]; // abort if there are not at least 3 inliers int nInliers(0); BoolArr inliers(faceDatas.size()); FOREACH(i, faceDatas) { const Color& color = faceDatas[i].color; for (int c=0; c<3; ++c) { if (ABS(median[c]-color[c]) > thOutlier) { inliers[i] = false; goto CONTINUE_LOOP; } } inliers[i] = true; ++nInliers; CONTINUE_LOOP:; } if (nInliers == faceDatas.size()) return true; if (nInliers < 3) return false; // remove outliers RFOREACH(i, faceDatas) if (!inliers[i]) faceDatas.RemoveAt(i); return true; } #elif TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA // A multi-variate normal distribution which is NOT normalized such that the integral is 1 // - centered is the vector for which the function is to be evaluated with the mean subtracted [Nx1] // - X is the vector for which the function is to be evaluated [Nx1] // - mu is the mean around which the distribution is centered [Nx1] // - covarianceInv is the inverse of the covariance matrix [NxN] // return exp(-1/2 * (X-mu)^T * covariance_inv * (X-mu)) template inline T MultiGaussUnnormalized(const Eigen::Matrix& centered, const Eigen::Matrix& covarianceInv) { return EXP(T(-0.5) * T(centered.adjoint() * covarianceInv * centered)); } template inline T MultiGaussUnnormalized(const Eigen::Matrix& X, const Eigen::Matrix& mu, const Eigen::Matrix& covarianceInv) { return MultiGaussUnnormalized(X - mu, covarianceInv); } // decrease the quality of / remove all views in which the face's projection // has a much different color than in the majority of views bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const { // reject all views whose gauss value is below this threshold if (thOutlier <= 0) thOutlier = 6e-2f; const float minCovariance(1e-3f); // if all covariances drop below this the outlier detection aborted const unsigned maxIterations(10); const unsigned minInliers(4); // init colors array if (faceDatas.size() <= minInliers) return false; Eigen::Matrix3Xd colorsAll(3, faceDatas.size()); BoolArr inliers(faceDatas.size()); FOREACH(i, faceDatas) { colorsAll.col(i) = ((const Color::EVec)faceDatas[i].color).cast(); inliers[i] = true; } // perform outlier removal; abort if something goes wrong // (number of inliers below threshold or can not invert the covariance) size_t numInliers(faceDatas.size()); Eigen::Vector3d mean; Eigen::Matrix3d covariance; Eigen::Matrix3d covarianceInv; for (unsigned iter = 0; iter < maxIterations; ++iter) { // compute the mean color and color covariance only for inliers const Eigen::Block colors(colorsAll.leftCols(numInliers)); mean = colors.rowwise().mean(); const Eigen::Matrix3Xd centered(colors.colwise() - mean); covariance = (centered * centered.transpose()) / double(colors.cols() - 1); // stop if all covariances gets very small if (covariance.array().abs().maxCoeff() < minCovariance) { // remove the outliers RFOREACH(i, faceDatas) if (!inliers[i]) faceDatas.RemoveAt(i); return true; } // invert the covariance matrix // (FullPivLU not the fastest, but gives feedback about numerical stability during inversion) const Eigen::FullPivLU lu(covariance); if (!lu.isInvertible()) return false; covarianceInv = lu.inverse(); // filter inliers // (all views with a gauss value above the threshold) numInliers = 0; bool bChanged(false); FOREACH(i, faceDatas) { const Eigen::Vector3d color(((const Color::EVec)faceDatas[i].color).cast()); const double gaussValue(MultiGaussUnnormalized(color, mean, covarianceInv)); bool& inlier = inliers[i]; if (gaussValue > thOutlier) { // set as inlier colorsAll.col(numInliers++) = color; if (inlier != true) { inlier = true; bChanged = true; } } else { // set as outlier if (inlier != false) { inlier = false; bChanged = true; } } } if (numInliers == faceDatas.size()) return true; if (numInliers < minInliers) return false; if (!bChanged) break; } #if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_GAUSS_DAMPING // select the final inliers const float factorOutlierRemoval(0.2f); covarianceInv *= factorOutlierRemoval; RFOREACH(i, faceDatas) { const Eigen::Vector3d color(((const Color::EVec)faceDatas[i].color).cast()); const double gaussValue(MultiGaussUnnormalized(color, mean, covarianceInv)); ASSERT(gaussValue >= 0 && gaussValue <= 1); faceDatas[i].quality *= gaussValue; } #endif #if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_GAUSS_CLAMPING // remove outliers RFOREACH(i, faceDatas) if (!inliers[i]) faceDatas.RemoveAt(i); #endif return true; } #endif bool MeshTexture::FaceViewSelection( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views) { // extract array of triangles incident to each vertex ListVertexFaces(); // create texture patches { // compute face normals and smoothen them scene.mesh.SmoothNormalFaces(); const bool bUseVirtualFaces(minCommonCameras > 0); // list all views for each face FaceDataViewArr facesDatas; if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) return false; // create faces graph typedef boost::adjacency_list Graph; typedef boost::graph_traits::edge_iterator EdgeIter; typedef boost::graph_traits::out_edge_iterator EdgeOutIter; Graph graph; LabelArr labels; // construct and use virtual faces for patch creation instead of actual mesh faces; // the virtual faces are composed of coplanar triangles sharing same views if (bUseVirtualFaces) { // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it size_t controlCounter(0); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; for (FIndex idxFace : vf) { mapFaceToVirtualFace[idxFace] = idxVF; ++controlCounter; } } ASSERT(controlCounter == faces.size()); // 2) create function to find virtual faces neighbors VirtualFaceIdxsArr virtualFaceNeighbors; { // for each virtual face, the list of virtual faces with at least one vertex in common virtualFaceNeighbors.resize(virtualFaces.size()); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; for (FIndex idxFace : vf) { const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; for (int i = 0; i < 3; ++i) { const FIndex fAdj(adjFaces[i]); if (fAdj == NO_ID) continue; if (mapFaceToVirtualFace[fAdj] == idxVF) continue; if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); } } } } } // 3) use virtual faces to build the graph // 4) assign images to virtual faces // 5) spread image ID to each mesh face from virtual face FOREACH(idxFace, virtualFaces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxVirtualFace, virtualFaces) { const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; for (FIndex idxVirtualFaceAdj: afaces) { if (idxVirtualFace >= idxVirtualFaceAdj) continue; const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) continue; boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); } } ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(virtualFaces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, virtualFaces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } //* // set data costs for all labels (except label 0 - undefined) FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } //*/ /* FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 2; // 当可用视角<=2时强制单视图 // 当视角不足时,只保留最佳视角 // if (numViews <= minSingleView) { if (true) { // 找到质量最高的视角 float maxQuality = 0; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } // 只设置最佳视角的数据项,其他设为MaxEnergy for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; // const float cost = (fd.idxView == bestView) ? // (1.f - fd.quality/normQuality) * MaxEnergy : // MaxEnergy; const float cost = (fd.idxView == bestView) ? (1.f - fd.quality/normQuality) * MaxEnergy : MaxEnergy; inference.SetDataCost(label, f, cost); } } else { for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); // const float normalizedQuality = faceData.quality/normQuality; const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } } //*/ // assign the optimal view (label) to each face // (label 0 is reserved as undefined) inference.Optimize(); // extract resulting labeling LabelArr virtualLabels(virtualFaces.size()); virtualLabels.Memset(0xFF); FOREACH(l, virtualLabels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) virtualLabels[l] = label-1; } FOREACH(l, labels) { labels[l] = virtualLabels[mapFaceToVirtualFace[l]]; } #endif } graph.clear(); /* // 标记虚拟面边界为接缝 FOREACH(idxVF, virtualFaces) { const auto& vf = virtualFaces[idxVF]; for (FIndex fid : vf) { const auto& adjFaces = faceFaces[fid]; for (int i=0; i<3; ++i) { if (adjFaces[i] == NO_ID) continue; const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; if (adjVF != idxVF) { seamEdges.emplace_back(fid, adjFaces[i]); } } } } //*/ } /* #if TEXOPT_USE_ANISOTROPIC const int anisoLevel = 8; // 设置各向异性过滤级别 for (auto& tex : textures) { tex.SetFilterMode(Texture::ANISOTROPIC); tex.SetAnisotropy(anisoLevel); } #endif //*/ // create the graph of faces: each vertex is a face and the edges are the edges shared by the faces FOREACH(idxFace, faces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxFace, faces) { const Mesh::FaceFaces& afaces = faceFaces[idxFace]; for (int v=0; v<3; ++v) { const FIndex idxFaceAdj = afaces[v]; if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) continue; const bool bInvisibleFace(facesDatas[idxFace].empty()); const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) { if (bInvisibleFace != bInvisibleFaceAdj) seamEdges.emplace_back(idxFace, idxFaceAdj); continue; } boost::add_edge(idxFace, idxFaceAdj, graph); } } faceFaces.Release(); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); // LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; // start patch creation starting directly from individual faces if (!bUseVirtualFaces) { // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(faces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, faces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } //* // set data costs for all labels (except label 0 - undefined) FOREACH(f, facesDatas) { const FaceDataArr& faceDatas = facesDatas[f]; const size_t numViews = faceDatas.size(); unsigned minViews=3; float dataWeightFactor=2.0f; // LOG_OUT() << "FaceViewSelection numViews=" << numViews << std::endl; const float factor = (numViews < minViews) ? dataWeightFactor : 1.0f; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); // const float dataCost((1.f-normalizedQuality)*MaxEnergy * factor); const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } //*/ /* FOREACH(f, facesDatas) { const FaceDataArr& faceDatas = facesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 2; // 当可用视角<=5时强制单视图 // 当视角不足时,只保留最佳视角 // if (numViews <= minSingleView) { if (true) { // 找到质量最高的视角 float maxQuality = 0; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } // 只设置最佳视角的数据项,其他设为MaxEnergy for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; // const float cost = (fd.idxView == bestView) ? // (1.f - fd.quality/normQuality) * MaxEnergy : // MaxEnergy; const float cost = (fd.idxView == bestView) ? (1.f - fd.quality/normQuality) * MaxEnergy : 0; inference.SetDataCost(label, f, cost); } } else { // 正常处理多视角情况 for (const FaceData& faceData : faceDatas) { const Label label = (Label)faceData.idxView + 1; const float normalizedQuality = faceData.quality/normQuality; const float dataCost = (1.f - normalizedQuality) * MaxEnergy; inference.SetDataCost(label, f, dataCost); } } } //*/ // assign the optimal view (label) to each face // (label 0 is reserved as undefined) inference.Optimize(); // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) labels[l] = label-1; } #endif #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS // find connected components ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // map face ID from global to component space typedef cList NodeIDs; NodeIDs nodeIDs(faces.size()); NodeIDs sizes(nComponents); sizes.Memset(0); FOREACH(c, components) nodeIDs[c] = sizes[components[c]]++; // initialize inference structures const LabelID numLabels(images.size()+1); CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); FOREACH(s, sizes) { const NodeID numNodes(sizes[s]); ASSERT(numNodes > 0); if (numNodes <= 1) continue; TRWSInference& inference = inferences[s]; inference.Init(numNodes, numLabels); } // set data costs { // add nodes CLISTDEF0(EnergyType) D(numLabels); FOREACH(f, facesDatas) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; D.MemsetValue(MaxEnergy); const FaceDataArr& faceDatas = facesDatas[f]; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); D[label] = dataCost; } const NodeID nodeID(nodeIDs[f]); inference.AddNode(nodeID, D.Begin()); } // add edges EdgeOutIter ei, eie; FOREACH(f, faces) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); ASSERT(components[f] == components[fAdj]); if (f < fAdj) // add edges only once inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); } } } // assign the optimal view (label) to each face #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)inferences.size(); ++i) { #else FOREACH(i, inferences) { #endif TRWSInference& inference = inferences[i]; if (inference.IsEmpty()) continue; inference.Optimize(); } // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { TRWSInference& inference = inferences[components[l]]; if (inference.IsEmpty()) continue; const Label label(inference.GetLabel(nodeIDs[l])); ASSERT(label >= 0 && label < numLabels); if (label < images.size()) labels[l] = label; } #endif } } // create texture patches { // divide graph in sub-graphs of connected faces having the same label EdgeIter ei, eie; const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { const FIndex fSource((FIndex)ei->m_source); const FIndex fTarget((FIndex)ei->m_target); ASSERT(components.empty() || components[fSource] == components[fTarget]); if (labels[fSource] != labels[fTarget]) seamEdges.emplace_back(fSource, fTarget); } for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) boost::remove_edge(pEdge->i, pEdge->j, graph); // find connected components: texture patches ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // create texture patches; // last texture patch contains all faces with no texture LabelArr sizes(nComponents); sizes.Memset(0); FOREACH(c, components) ++sizes[components[c]]; texturePatches.resize(nComponents+1); texturePatches.back().label = NO_ID; FOREACH(f, faces) { const Label label(labels[f]); const FIndex c(components[f]); TexturePatch& texturePatch = texturePatches[c]; ASSERT(texturePatch.label == label || texturePatch.faces.empty()); if (label == NO_ID) { texturePatch.label = NO_ID; texturePatches.back().faces.Insert(f); } else { if (texturePatch.faces.empty()) { texturePatch.label = label; texturePatch.faces.reserve(sizes[c]); } texturePatch.faces.Insert(f); } } // remove all patches with invalid label (except the last one) // and create the map from the old index to the new one mapIdxPatch.resize(nComponents); std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); for (FIndex t = nComponents; t-- > 0; ) { if (texturePatches[t].label == NO_ID) { texturePatches.RemoveAtMove(t); mapIdxPatch.RemoveAtMove(t); } } const unsigned numPatches(texturePatches.size()-1); uint32_t idxPatch(0); for (IndexArr::IDX i=0; i 0); // list all views for each face FaceDataViewArr facesDatas; if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) return false; // create faces graph typedef boost::adjacency_list Graph; typedef boost::graph_traits::edge_iterator EdgeIter; typedef boost::graph_traits::out_edge_iterator EdgeOutIter; Graph graph; LabelArr labels; // construct and use virtual faces for patch creation instead of actual mesh faces; // the virtual faces are composed of coplanar triangles sharing same views if (bUseVirtualFaces) { // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID CreateAdaptiveVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); // CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it size_t controlCounter(0); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; for (FIndex idxFace : vf) { mapFaceToVirtualFace[idxFace] = idxVF; ++controlCounter; } } ASSERT(controlCounter == faces.size()); // 2) create function to find virtual faces neighbors VirtualFaceIdxsArr virtualFaceNeighbors; { virtualFaceNeighbors.resize(virtualFaces.size()); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; printf("Processing virtual face %zu/%zu\n", idxVF, virtualFaces.size()); for (FIndex idxFace : vf) { const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; for (int i = 0; i < 3; ++i) { const FIndex fAdj(adjFaces[i]); // 关键修复:添加边界检查 if (fAdj == NO_ID) continue; if (fAdj >= mapFaceToVirtualFace.size()) { continue; } if (mapFaceToVirtualFace[fAdj] == idxVF) continue; if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); } } } } } // 3) use virtual faces to build the graph FOREACH(idxFace, virtualFaces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxVirtualFace, virtualFaces) { const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; for (FIndex idxVirtualFaceAdj: afaces) { if (idxVirtualFace >= idxVirtualFaceAdj) continue; // 关键修复:添加有效性检查 if (idxVirtualFace >= virtualFacesDatas.size() || idxVirtualFaceAdj >= virtualFacesDatas.size()) { printf("Skipping invalid virtual face pair: %u, %u\n", idxVirtualFace, idxVirtualFaceAdj); continue; } const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) continue; boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); } } ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } // const float normQuality(hist.GetApproximatePermille(0.95f)); const float normQuality(hist.GetApproximatePermille(0.8f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(virtualFaces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, virtualFaces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); // inference.SetDataCost((Label)0, f, 0); } } FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 2; // 当可用视角<=2时强制单视图 // 当视角不足时,只保留最佳视角 if (numViews <= minSingleView) { // if (true) { // 找到质量最高的视角 float maxQuality = 0; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } // 只设置最佳视角的数据项,其他设为MaxEnergy for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; // const float cost = (fd.idxView == bestView) ? // (1.f - fd.quality/normQuality) * MaxEnergy : // MaxEnergy; const float cost = (fd.idxView == bestView) ? (1.f - fd.quality/normQuality) * MaxEnergy : 0; inference.SetDataCost(label, f, cost); } } else { for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float cost = (faceData.quality>=normQuality) ? (1.f - faceData.quality/normQuality) * MaxEnergy : 0; inference.SetDataCost(label, f, cost); } } } // assign the optimal view (label) to each face // (label 0 is reserved as undefined) inference.Optimize(); // extract resulting labeling LabelArr virtualLabels(virtualFaces.size()); virtualLabels.Memset(0xFF); FOREACH(l, virtualLabels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) virtualLabels[l] = label-1; } // 在标签传播部分添加边界检查 FOREACH(l, labels) { const size_t virtualIdx = mapFaceToVirtualFace[l]; if (virtualIdx < virtualLabels.size()) { labels[l] = virtualLabels[virtualIdx]; } else { // 处理无效索引情况 labels[l] = NO_ID; // printf("警告:虚拟面索引 %zu 超出范围 (最大 %zu)\n", // virtualIdx, virtualLabels.size()-1); } } // 在添加边的部分添加有效性检查 FOREACH(idxVirtualFace, virtualFaces) { const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; for (FIndex idxVirtualFaceAdj: afaces) { if (idxVirtualFace >= idxVirtualFaceAdj) continue; // 添加有效性检查 if (idxVirtualFace >= virtualFaces.size() || idxVirtualFaceAdj >= virtualFaces.size()) { // printf("跳过无效虚拟面对:%u, %u (最大 %zu)\n", // idxVirtualFace, idxVirtualFaceAdj, virtualFaces.size()-1); continue; } const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) continue; boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); } } #endif } graph.clear(); } // create the graph of faces: each vertex is a face and the edges are the edges shared by the faces FOREACH(idxFace, faces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxFace, faces) { const Mesh::FaceFaces& afaces = faceFaces[idxFace]; for (int v=0; v<3; ++v) { const FIndex idxFaceAdj = afaces[v]; if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) continue; const bool bInvisibleFace(facesDatas[idxFace].empty()); const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) { if (bInvisibleFace != bInvisibleFaceAdj) seamEdges.emplace_back(idxFace, idxFaceAdj); continue; } boost::add_edge(idxFace, idxFaceAdj, graph); } } faceFaces.Release(); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); // LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; // start patch creation starting directly from individual faces if (!bUseVirtualFaces) { // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(faces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, faces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } FOREACH(f, facesDatas) { const FaceDataArr& faceDatas = facesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 2; // 当可用视角<=5时强制单视图 // 当视角不足时,只保留最佳视角 if (numViews <= minSingleView) { // if (true) { // 找到质量最高的视角 float maxQuality = 0; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality) { maxQuality = fd.quality; bestView = fd.idxView; } } // 只设置最佳视角的数据项,其他设为MaxEnergy for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; // const float cost = (fd.idxView == bestView) ? // (1.f - fd.quality/normQuality) * MaxEnergy : // MaxEnergy; const float cost = (fd.idxView == bestView) ? (1.f - fd.quality/normQuality) * MaxEnergy : 0; inference.SetDataCost(label, f, cost); } } else { // 正常处理多视角情况 for (const FaceData& faceData : faceDatas) { const Label label = (Label)faceData.idxView + 1; const float normalizedQuality = faceData.quality/normQuality; const float dataCost = (1.f - normalizedQuality) * MaxEnergy; inference.SetDataCost(label, f, dataCost); } } } // assign the optimal view (label) to each face // (label 0 is reserved as undefined) inference.Optimize(); // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) labels[l] = label-1; } #endif #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS // find connected components ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // map face ID from global to component space typedef cList NodeIDs; NodeIDs nodeIDs(faces.size()); NodeIDs sizes(nComponents); sizes.Memset(0); FOREACH(c, components) nodeIDs[c] = sizes[components[c]]++; // initialize inference structures const LabelID numLabels(images.size()+1); CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); FOREACH(s, sizes) { const NodeID numNodes(sizes[s]); ASSERT(numNodes > 0); if (numNodes <= 1) continue; TRWSInference& inference = inferences[s]; inference.Init(numNodes, numLabels); } // set data costs { // add nodes CLISTDEF0(EnergyType) D(numLabels); FOREACH(f, facesDatas) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; D.MemsetValue(MaxEnergy); const FaceDataArr& faceDatas = facesDatas[f]; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); D[label] = dataCost; } const NodeID nodeID(nodeIDs[f]); inference.AddNode(nodeID, D.Begin()); } // add edges EdgeOutIter ei, eie; FOREACH(f, faces) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); ASSERT(components[f] == components[fAdj]); if (f < fAdj) // add edges only once inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); } } } // assign the optimal view (label) to each face #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)inferences.size(); ++i) { #else FOREACH(i, inferences) { #endif TRWSInference& inference = inferences[i]; if (inference.IsEmpty()) continue; inference.Optimize(); } // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { TRWSInference& inference = inferences[components[l]]; if (inference.IsEmpty()) continue; const Label label(inference.GetLabel(nodeIDs[l])); ASSERT(label >= 0 && label < numLabels); if (label < images.size()) labels[l] = label; } #endif } } // create texture patches { // divide graph in sub-graphs of connected faces having the same label EdgeIter ei, eie; const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { const FIndex fSource((FIndex)ei->m_source); const FIndex fTarget((FIndex)ei->m_target); ASSERT(components.empty() || components[fSource] == components[fTarget]); if (labels[fSource] != labels[fTarget]) seamEdges.emplace_back(fSource, fTarget); } for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) boost::remove_edge(pEdge->i, pEdge->j, graph); // find connected components: texture patches ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // create texture patches; // last texture patch contains all faces with no texture LabelArr sizes(nComponents); sizes.Memset(0); FOREACH(c, components) ++sizes[components[c]]; texturePatches.resize(nComponents+1); texturePatches.back().label = NO_ID; FOREACH(f, faces) { const Label label(labels[f]); const FIndex c(components[f]); TexturePatch& texturePatch = texturePatches[c]; ASSERT(texturePatch.label == label || texturePatch.faces.empty()); if (label == NO_ID) { texturePatch.label = NO_ID; texturePatches.back().faces.Insert(f); } else { if (texturePatch.faces.empty()) { texturePatch.label = label; texturePatch.faces.reserve(sizes[c]); } texturePatch.faces.Insert(f); } } // remove all patches with invalid label (except the last one) // and create the map from the old index to the new one mapIdxPatch.resize(nComponents); std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); for (FIndex t = nComponents; t-- > 0; ) { if (texturePatches[t].label == NO_ID) { texturePatches.RemoveAtMove(t); mapIdxPatch.RemoveAtMove(t); } } const unsigned numPatches(texturePatches.size()-1); uint32_t idxPatch(0); for (IndexArr::IDX i=0; i processed(faces.size(), false); std::vector tmpVirtualFaces; // === 2. 核心合并逻辑(基于共视相机条件)=== for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { if (processed[idxFace]) continue; // 创建新虚拟面片 Mesh::FaceIdxArr newVirtualFace; newVirtualFace.emplace_back(idxFace); processed[idxFace] = true; // 广度优先搜索合并相邻面 std::queue faceQueue; faceQueue.push(idxFace); while (!faceQueue.empty()) { FIndex current = faceQueue.front(); faceQueue.pop(); // 遍历相邻面 const Mesh::FaceFaces& adjFaces = faceFaces[current]; for (int i = 0; i < 3; ++i) { FIndex neighbor = adjFaces[i]; if (neighbor == NO_ID || processed[neighbor]) continue; // 关键修改:仅依赖共视条件判断 if (ShouldMergeVirtualFace(facesDatas, newVirtualFace, neighbor, minCommonCameras)) { newVirtualFace.emplace_back(neighbor); processed[neighbor] = true; faceQueue.push(neighbor); } } } // 保存有效虚拟面片 if (newVirtualFace.size() > 1) { tmpVirtualFaces.emplace_back(newVirtualFace); } else // 单一面片恢复状态 { processed[idxFace] = false; } } // === 3. 处理独立面片 === for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { if (processed[idxFace] || mapFaceToVirtualFace[idxFace] != NO_ID) continue; // 创建单面虚拟面片 const size_t newIdx = virtualFaces.size(); virtualFaces.emplace_back(Mesh::FaceIdxArr{idxFace}); mapFaceToVirtualFace[idxFace] = virtualFaces.size() - 1; // 更新映射和数据 // mapFaceToVirtualFace[idxFace] = newIdx; virtualFacesDatas.emplace_back(facesDatas[idxFace]); } // === 4. 整合合并结果 === for (auto& vf : tmpVirtualFaces) { // 合并面片数据 FaceDataArr mergedData; for (FIndex f : vf) { for (auto& data : facesDatas[f]) { mergedData.emplace_back(data); } } // 相机视角去重 std::sort(mergedData.begin(), mergedData.end(), [](const FaceData& a, const FaceData& b) { return a.idxView < b.idxView; }); auto last = std::unique(mergedData.begin(), mergedData.end(), [](const FaceData& a, const FaceData& b) { return a.idxView == b.idxView; }); // 排序(确保相同相机ID连续) mergedData.Sort([](const FaceData& a, const FaceData& b) { return a.idxView < b.idxView; }); for (unsigned idx = 0; idx < mergedData.GetSize(); ) { if (idx + 1 < mergedData.GetSize() && mergedData[idx].idxView == mergedData[idx+1].idxView) { mergedData.RemoveAt(idx+1); } else { idx++; // 只有不删除时才增加索引 } } // 保存结果 virtualFaces.emplace_back(vf); const size_t newIdx = virtualFaces.size()-1; // 获取实际索引 for (FIndex f : vf) { mapFaceToVirtualFace[f] = newIdx; } virtualFacesDatas.emplace_back(mergedData); } } // === 辅助函数:检查是否满足合并条件 === bool MeshTexture::ShouldMergeVirtualFace( const MeshTexture::FaceDataViewArr& facesDatas, const Mesh::FaceIdxArr& currentVirtualFace, FIndex candidateFace, unsigned minCommonCameras) { // 1. 获取当前虚拟面片的相机集合 std::set currentCams; for (FIndex f : currentVirtualFace) { for (const auto& data : facesDatas[f]) { currentCams.insert(data.idxView); } } // 2. 检查候选面片与虚拟面片的共视率 int commonCount = 0; for (const auto& data : facesDatas[candidateFace]) { if (currentCams.find(data.idxView) != currentCams.end()) { if (++commonCount >= minCommonCameras) { return true; } } } for (IIndex view : currentCams) { bool valid = false; for (const FaceData& fd : facesDatas[candidateFace]) { if (fd.idxView == view && !fd.bInvalidFacesRelative) { valid = true; break; } } if (!valid) return false; // 存在无效视图 } return true; // return false; } bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views, bool bUseExistingUV) { // extract array of triangles incident to each vertex ListVertexFaces(bUseExistingUV); // create texture patches { // printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // compute face normals and smoothen them scene.mesh.SmoothNormalFaces(); // printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); bool bUseVirtualFaces(minCommonCameras > 0); // list all views for each face FaceDataViewArr facesDatas; if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) return false; // create faces graph typedef boost::adjacency_list Graph; typedef boost::graph_traits::edge_iterator EdgeIter; typedef boost::graph_traits::out_edge_iterator EdgeOutIter; Graph graph; LabelArr labels; // 为每个面片单独决定是否使用虚拟面算法 // Mesh::FaceIdxArr virtualFacesI; // 使用虚拟面算法的面片 Mesh::FaceIdxArr perFaceFaces; // 使用逐面算法的面片 /* bool bVirtualFacesSuccess = false; if (bUseVirtualFaces) { // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID bVirtualFacesSuccess = CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); if (!virtualFaces.empty()) { bVirtualFacesSuccess = true; } if (!bVirtualFacesSuccess) { bUseVirtualFaces = false; DEBUG_EXTRA("Warning: Failed to create virtual faces. Falling back to per-face view selection."); } } */ // construct and use virtual faces for patch creation instead of actual mesh faces; // the virtual faces are composed of coplanar triangles sharing same views if (bUseVirtualFaces) { Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it // 标记使用虚拟面算法的面片 std::vector isVirtualFace(faces.size(), true); // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID // CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); // CreateVirtualFaces3(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); // CreateVirtualFaces4(facesDatas, virtualFacesDatas, virtualFaces, mapFaceToVirtualFace, minCommonCameras); CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras); TD_TIMER_STARTD(); // CreateVirtualFaces7(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras); DEBUG_EXTRA("CreateVirtualFaces7 completed: %s", TD_TIMER_GET_FMT().c_str()); size_t controlCounter(0); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; for (FIndex idxFace : vf) { mapFaceToVirtualFace[idxFace] = idxVF; // isVirtualFace[idxFace] = true; // virtualFacesI.push_back(idxFace); ++controlCounter; } } // 标记使用逐面算法的面片 FOREACH(f, faces) { if (isVirtualFace[f]) { perFaceFaces.push_back(f); } } // printf("perFaceFaces.size = %d\n", perFaceFaces.size()); ASSERT(controlCounter == faces.size()); // 2) create function to find virtual faces neighbors VirtualFaceIdxsArr virtualFaceNeighbors; { // for each virtual face, the list of virtual faces with at least one vertex in common virtualFaceNeighbors.resize(virtualFaces.size()); //* FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; for (FIndex idxFace : vf) { const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; for (int i = 0; i < 3; ++i) { const FIndex fAdj(adjFaces[i]); if (fAdj == NO_ID) continue; if (mapFaceToVirtualFace[fAdj] == idxVF) continue; if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); } } } } //*/ } // 3) use virtual faces to build the graph // 4) assign images to virtual faces // 5) spread image ID to each mesh face from virtual face FOREACH(idxFace, virtualFaces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } //* FOREACH(idxVirtualFace, virtualFaces) { const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; for (FIndex idxVirtualFaceAdj: afaces) { if (idxVirtualFace >= idxVirtualFaceAdj) continue; const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) continue; boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); } } //*/ // 这里通过深度图判断virtualFaces是否要为invalid // CheckInvalidFaces(virtualFacesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) printf("virtualFacesDatas.size()=%d, facesDatas.size()=%d\n", virtualFacesDatas.size(), facesDatas.size()); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); // assign the best view to each face labels.resize(faces.size()); labelsInvalid.resize(faces.size()); FOREACH(l, labelsInvalid) { labelsInvalid[l] = NO_ID; } { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } // const float normQuality(hist.GetApproximatePermille(0.95f)); const float normQuality(hist.GetApproximatePermille(0.8f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(virtualFaces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, virtualFaces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); // inference.SetDataCost((Label)0, f, 0); } } /* // set data costs for all labels (except label 0 - undefined) FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } //*/ //* FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 6; // 当可用视角<=2时强制单视图 bool bInvalidFacesRelative = false; IIndex invalidView; float invalidQuality; // 当视角不足时,只保留最佳视角 if (numViews <= minSingleView) { // if (true) { std::vector> sortedViews; std::vector> sortedViews2; sortedViews.reserve(faceDatas.size()); sortedViews2.reserve(faceDatas.size()); for (const FaceData& fd : faceDatas) { if (fd.bInvalidFacesRelative) { bInvalidFacesRelative = true; // sortedViews.emplace_back(fd.quality, fd.idxView); invalidView = fd.idxView; invalidQuality = fd.quality; // const Label label = (Label)fd.idxView + 1; // inference.SetDataCost(label, f, MaxEnergy); // sortedViews.emplace_back(fd.quality, fd.idxView); sortedViews2.emplace_back(fd.quality, fd.color); } else { // if (fd.quality<=999.0) { sortedViews.emplace_back(fd.quality, fd.idxView); sortedViews2.emplace_back(fd.quality, fd.color); // printf("1fd.quality=%f\n", fd.quality); } // else // printf("2fd.quality=%f\n", fd.quality); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); std::sort(sortedViews2.begin(), sortedViews2.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); // 设置数据成本:最佳视角成本最低,其他按质量排序递增 const float baseCostScale = 0.1f; // 基础成本缩放系数 const float costStep = 0.3f; // 相邻视角成本增量 if (bInvalidFacesRelative && sortedViews.size() == 0) // if (bInvalidFacesRelative) { // const Label label = (Label)sortedViews[0].second + 1; const Label label = (Label)invalidView + 1; float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; cost = 0; inference.SetDataCost(label, f, cost); continue; } /* int nSize = sortedViews2.size(); float totalQuality = 0.0f; Color totalColor(0,0,0); for (int n = 0; n < nSize; ++n) { totalQuality += sortedViews2[n].first; totalColor += sortedViews2[n].second; } const float avgQuality = totalQuality / nSize; const Color avgColor = totalColor / nSize; if (sortedViews2.size()<=0) continue; // printf("sortedViews2.size=%d\n", sortedViews2.size()); const Color medianColor = ComputeMedianColorAndQuality(sortedViews2).color; const float medianQuality = ComputeMedianColorAndQuality(sortedViews2).quality; //*/ for (size_t i = 0; i < sortedViews.size(); ++i) { const Label label = (Label)sortedViews[i].second + 1; float cost; // 过滤不可见的面 std::string strPath = images[label-1].name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); // if (!scene.is_face_visible(strName.c_str(), f)) // continue; // 过滤不可见的面 /* const Color& viewColor = sortedViews2[i].second; // float colorDistance = cv::norm(avgColor - viewColor); // if (colorDistance>0.0001) // if (nSize>0) // printf("colorDistance=%f, nSize=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize, // avgColor.x, avgColor.y, avgColor.z, viewColor.x, viewColor.y, viewColor.z); // if (colorDistance>0.000) // continue; float colorDistance = cv::norm(viewColor - medianColor); if (colorDistance>0.0000) printf("colorDistance=%f, nSize=%d, i=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize, i, medianColor.x, medianColor.y, medianColor.z, viewColor.x, viewColor.y, viewColor.z); // float luminanceDistance = std::abs(viewLuminance - medianLuminance); if (colorDistance>10.0000) continue; //*/ if (i == 0) { // 最佳视角 // cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; } else { // 其他视角:成本随排名线性增加 int stepIndex = i; // if (i > 3) // stepIndex = i - 3; cost = MaxEnergy * (baseCostScale + costStep * stepIndex); // 确保成本不超过MaxEnergy cost = std::min(cost, MaxEnergy); } inference.SetDataCost(label, f, cost); } // // 找到质量最高的视角 // float maxQuality = 0; // IIndex bestView = NO_ID; // for (const FaceData& fd : faceDatas) { // if (fd.quality > maxQuality) { // maxQuality = fd.quality; // bestView = fd.idxView; // } // } // // 只设置最佳视角的数据项,其他设为MaxEnergy // for (const FaceData& fd : faceDatas) { // const Label label = (Label)fd.idxView + 1; // // const float cost = (fd.idxView == bestView) ? // // (1.f - fd.quality/normQuality) * MaxEnergy : // // MaxEnergy; // const float cost = (fd.idxView == bestView) ? // (1.f - fd.quality/normQuality) * MaxEnergy : // MaxEnergy; // inference.SetDataCost(label, f, cost); // } } else { /* for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); // const float normalizedQuality = faceData.quality/normQuality; const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } */ for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView+1); const float cost = (faceData.quality>=normQuality) ? (1.f - faceData.quality/normQuality) * MaxEnergy : 0; inference.SetDataCost(label, f, cost); } } } //*/ // assign the optimal view (label) to each face // (label 0 is reserved as undefined) #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // 初始化后添加 DEBUG_EXTRA("Starting LBP optimization with %d nodes", inference.GetNumNodes()); inference.Optimize(); DEBUG_EXTRA("LBP optimization finished"); #endif // extract resulting labeling LabelArr virtualLabels(virtualFaces.size()); virtualLabels.Memset(0xFF); FOREACH(l, virtualLabels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) virtualLabels[l] = label-1; } /* FOREACH(l, labels) { labels[l] = virtualLabels[mapFaceToVirtualFace[l]]; } */ /* if (!perFaceFaces.empty()) { FOREACH(f, perFaceFaces) { FaceDataArr& faceData = facesDatas[f]; if (faceData.empty()) continue; // 选择最佳视图 float bestQuality = -1; IIndex bestView = NO_ID; for (const FaceData& data : faceData) { if (data.quality > bestQuality) { bestQuality = data.quality; bestView = data.idxView; } } labels[f] = bestView; } } //*/ //* // 修改后安全版本 FOREACH(l, labels) { if (l < mapFaceToVirtualFace.size()) { const size_t virtualIdx = mapFaceToVirtualFace[l]; if (virtualIdx < virtualLabels.size()) { labels[l] = virtualLabels[virtualIdx]; } else { labels[l] = NO_ID; DEBUG_EXTRA("Warning: Invalid virtual face index for face %u: %u (max: %u)", l, virtualIdx, virtualLabels.size()-1); } } else { labels[l] = NO_ID; DEBUG_EXTRA("Warning: Face index out of bounds: %u (max: %u)", l, mapFaceToVirtualFace.size()-1); } } //*/ // 修改后安全版本 /* FOREACH(l, labels) { if (l < mapFaceToVirtualFace.size()) { const size_t virtualIdx = mapFaceToVirtualFace[l]; if (virtualIdx < virtualLabels.size()) { labels[l] = virtualLabels[virtualIdx]; } else { // 虚拟面映射失败,回退到非虚拟面方法:选择最佳视图 const FaceDataArr& faceDatas = facesDatas[l]; if (!faceDatas.empty()) { // 找到质量最高的视角 float maxQuality = -1; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { maxQuality = fd.quality; bestView = fd.idxView; } } labels[l] = bestView; } else { labels[l] = NO_ID; } DEBUG_EXTRA("Warning: Invalid virtual face index for face %u: %u (max: %u) - using best view %u", l, virtualIdx, virtualLabels.size()-1, labels[l]); } } else { // 面片索引越界,同样回退到非虚拟面方法 const FaceDataArr& faceDatas = facesDatas[l]; if (!faceDatas.empty()) { float maxQuality = -1; IIndex bestView = NO_ID; for (const FaceData& fd : faceDatas) { if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { maxQuality = fd.quality; bestView = fd.idxView; } } labels[l] = bestView; } else { labels[l] = NO_ID; } DEBUG_EXTRA("Warning: Face index out of bounds: %u (max: %u) - using best view %u", l, mapFaceToVirtualFace.size()-1, labels[l]); } } */ #endif } graph.clear(); //* // 标记虚拟面边界为接缝 FOREACH(idxVF, virtualFaces) { const auto& vf = virtualFaces[idxVF]; for (FIndex fid : vf) { const auto& adjFaces = faceFaces[fid]; for (int i=0; i<3; ++i) { if (adjFaces[i] == NO_ID) continue; const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; if (adjVF != idxVF) { seamEdges.emplace_back(fid, adjFaces[i]); } } } } //*/ } /* #if TEXOPT_USE_ANISOTROPIC const int anisoLevel = 8; // 设置各向异性过滤级别 for (auto& tex : textures) { tex.SetFilterMode(Texture::ANISOTROPIC); tex.SetAnisotropy(anisoLevel); } #endif //*/ // create the graph of faces: each vertex is a face and the edges are the edges shared by the faces FOREACH(idxFace, faces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxFace, faces) { const Mesh::FaceFaces& afaces = faceFaces[idxFace]; for (int v=0; v<3; ++v) { const FIndex idxFaceAdj = afaces[v]; if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) continue; const bool bInvisibleFace(facesDatas[idxFace].empty()); const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) { if (bInvisibleFace != bInvisibleFaceAdj) seamEdges.emplace_back(idxFace, idxFaceAdj); continue; } boost::add_edge(idxFace, idxFaceAdj, graph); } } faceFaces.Release(); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; // start patch creation starting directly from individual faces if (bUseVirtualFaces) // if (false) { // normalize quality values float maxQuality(0); /* for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } */ FOREACH(idxFace, facesDatas) { if (labels[idxFace] != NO_ID) continue; const FaceDataArr& faceDataArr = facesDatas[idxFace]; for (const FaceData& faceData : faceDataArr) { if (maxQuality < faceData.quality) maxQuality = faceData.quality; } } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); /* for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } */ FOREACH(idxFace, facesDatas) { if (labels[idxFace] != NO_ID) continue; const FaceDataArr& faceDataArr = facesDatas[idxFace]; for (const FaceData& faceData : faceDataArr) { hist.Add(faceData.quality); } } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(faces.size()); inference.SetSmoothCost(SmoothnessPotts); // inference.SetSmoothCost(SmoothnessLinear); // inference.SetSmoothCost(NewSmoothness); EdgeOutIter ei, eie; FOREACH(f, faces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } /* for (const FaceDataArr& faceDatas : facesDatas) { for (const FaceData& faceData : faceDatas) { if (faceData.quality > maxQuality) maxQuality = faceData.quality; } } for (const FaceDataArr& faceDatas : facesDatas) { for (const FaceData& faceData : faceDatas) hist.Add(faceData.quality); } */ FOREACH(f, faces) { if (labels[f] != NO_ID) { const Label assignedLabel = (Label)(labels[f] + 1); inference.SetDataCost(assignedLabel, f, 0); } } FOREACH(f, facesDatas) { if (labels[f] != NO_ID) continue; const FaceDataArr& faceDatas = facesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 1; // 与虚拟面模式相同的阈值 bool bInvalidFacesRelative = false; IIndex invalidView; float invalidQuality; { std::vector> sortedViews; sortedViews.reserve(faceDatas.size()); for (const FaceData& fd : faceDatas) { if (fd.bInvalidFacesRelative) { bInvalidFacesRelative = true; // sortedViews.emplace_back(fd.quality, fd.idxView); invalidView = fd.idxView; invalidQuality = fd.quality; } else { // if (fd.quality<=999.0) { sortedViews.emplace_back(fd.quality, fd.idxView); // printf("1fd.quality=%f\n", fd.quality); } // else // printf("2fd.quality=%f\n", fd.quality); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); // 设置数据成本:最佳视角成本最低,其他按质量排序递增 const float baseCostScale = 0.1f; // 基础成本缩放系数 const float costStep = 0.3f; // 相邻视角成本增量 for (const auto& image : images) { // printf("image name=%s\n", image.name.c_str()); } if (bInvalidFacesRelative && sortedViews.size() == 0) { // const Label label = (Label)sortedViews[0].second + 1; const Label label = (Label)invalidView + 1; float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; // float cost = 0; inference.SetDataCost(label, f, cost); continue; } // printf("sortedViews size=%d\n", sortedViews.size()); for (size_t i = 0; i < sortedViews.size(); ++i) { const Label label = (Label)sortedViews[i].second + 1; float cost; std::string strPath = images[label-1].name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); if (i == 0) { // if (true) { // 最佳视角 // cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; // cost = 0; inference.SetDataCost(label, f, cost); } else { // 其他视角:成本随排名线性增加 int stepIndex = i; // if (i > 3) // stepIndex = i - 3; cost = MaxEnergy * (baseCostScale + costStep * stepIndex); // 确保成本不超过MaxEnergy cost = std::min(cost, MaxEnergy); // cost = MaxEnergy; inference.SetDataCost(label, f, cost); } } } } // assign the optimal view (label) to each face // (label 0 is reserved as undefined) // inference.Optimize(); // extract resulting labeling FOREACH(l, labels) { if (labels[l] != NO_ID) continue; const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) { labels[l] = label-1; labelsInvalid[l] = labels[l]; } } #endif } if (!bUseVirtualFaces) { // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(faces.size()); inference.SetSmoothCost(SmoothnessPotts); // inference.SetSmoothCost(SmoothnessLinear); // inference.SetSmoothCost(NewSmoothness); EdgeOutIter ei, eie; FOREACH(f, faces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } //* for (const FaceDataArr& faceDatas : facesDatas) { for (const FaceData& faceData : faceDatas) { if (faceData.quality > maxQuality) maxQuality = faceData.quality; } } for (const FaceDataArr& faceDatas : facesDatas) { for (const FaceData& faceData : faceDatas) hist.Add(faceData.quality); } FOREACH(f, facesDatas) { // if (scene.mesh.invalidFacesRelative.data.contains(f)) // continue; const FaceDataArr& faceDatas = facesDatas[f]; const size_t numViews = faceDatas.size(); const unsigned minSingleView = 6; // 与虚拟面模式相同的阈值 bool bInvalidFacesRelative = false; IIndex invalidView; float invalidQuality; // if (numViews <= minSingleView) { if (true) { std::vector> sortedViews; sortedViews.reserve(faceDatas.size()); for (const FaceData& fd : faceDatas) { if (fd.bInvalidFacesRelative) { bInvalidFacesRelative = true; // sortedViews.emplace_back(fd.quality, fd.idxView); invalidView = fd.idxView; invalidQuality = fd.quality; } else { // if (fd.quality<=999.0) { sortedViews.emplace_back(fd.quality, fd.idxView); // printf("1fd.quality=%f\n", fd.quality); } // else // printf("2fd.quality=%f\n", fd.quality); } } std::sort(sortedViews.begin(), sortedViews.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); // 设置数据成本:最佳视角成本最低,其他按质量排序递增 const float baseCostScale = 0.1f; // 基础成本缩放系数 const float costStep = 0.3f; // 相邻视角成本增量 for (const auto& image : images) { // printf("image name=%s\n", image.name.c_str()); } if (bInvalidFacesRelative && sortedViews.size() == 0) { // const Label label = (Label)sortedViews[0].second + 1; const Label label = (Label)invalidView + 1; float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; // float cost = 0; inference.SetDataCost(label, f, cost); continue; } // printf("sortedViews size=%d\n", sortedViews.size()); for (size_t i = 0; i < sortedViews.size(); ++i) { const Label label = (Label)sortedViews[i].second + 1; float cost; std::string strPath = images[label-1].name; size_t lastSlash = strPath.find_last_of("/\\"); if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 else lastSlash++; // 跳过分隔符 // 查找扩展名分隔符 '.' 的位置 size_t lastDot = strPath.find_last_of('.'); if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 // 截取文件名(不含路径和扩展名) std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); if (i == 0) { // if (true) { // 最佳视角 // cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; // cost = 0; inference.SetDataCost(label, f, cost); } else { // 其他视角:成本随排名线性增加 int stepIndex = i; // if (i > 3) // stepIndex = i - 3; cost = MaxEnergy * (baseCostScale + costStep * stepIndex); // 确保成本不超过MaxEnergy cost = std::min(cost, MaxEnergy); // cost = MaxEnergy; inference.SetDataCost(label, f, cost); } } } else { for (const FaceData& fd : faceDatas) { const Label label = (Label)fd.idxView + 1; const float normalizedQuality = fd.quality / normQuality; const float cost = (1.f - normalizedQuality) * MaxEnergy; inference.SetDataCost(label, f, cost); } } } //*/ // assign the optimal view (label) to each face // (label 0 is reserved as undefined) inference.Optimize(); // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) labels[l] = label-1; } #endif #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS // find connected components ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // map face ID from global to component space typedef cList NodeIDs; NodeIDs nodeIDs(faces.size()); NodeIDs sizes(nComponents); sizes.Memset(0); FOREACH(c, components) nodeIDs[c] = sizes[components[c]]++; // initialize inference structures const LabelID numLabels(images.size()+1); CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); FOREACH(s, sizes) { const NodeID numNodes(sizes[s]); ASSERT(numNodes > 0); if (numNodes <= 1) continue; TRWSInference& inference = inferences[s]; inference.Init(numNodes, numLabels); } // set data costs { // add nodes CLISTDEF0(EnergyType) D(numLabels); FOREACH(f, facesDatas) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; D.MemsetValue(MaxEnergy); const FaceDataArr& faceDatas = facesDatas[f]; for (const FaceData& faceData: faceDatas) { const Label label((Label)faceData.idxView); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); D[label] = dataCost; } const NodeID nodeID(nodeIDs[f]); inference.AddNode(nodeID, D.Begin()); } // add edges EdgeOutIter ei, eie; FOREACH(f, faces) { TRWSInference& inference = inferences[components[f]]; if (inference.IsEmpty()) continue; for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); ASSERT(components[f] == components[fAdj]); if (f < fAdj) // add edges only once inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); } } } // assign the optimal view (label) to each face #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)inferences.size(); ++i) { #else FOREACH(i, inferences) { #endif TRWSInference& inference = inferences[i]; if (inference.IsEmpty()) continue; inference.Optimize(); } // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { TRWSInference& inference = inferences[components[l]]; if (inference.IsEmpty()) continue; const Label label(inference.GetLabel(nodeIDs[l])); ASSERT(label >= 0 && label < numLabels); if (label < images.size()) labels[l] = label; } #endif } } // create texture patches { // divide graph in sub-graphs of connected faces having the same label EdgeIter ei, eie; const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { const FIndex fSource((FIndex)ei->m_source); const FIndex fTarget((FIndex)ei->m_target); ASSERT(components.empty() || components[fSource] == components[fTarget]); if (labels[fSource] != labels[fTarget]) seamEdges.emplace_back(fSource, fTarget); } for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) boost::remove_edge(pEdge->i, pEdge->j, graph); // find connected components: texture patches ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // create texture patches; // last texture patch contains all faces with no texture LabelArr sizes(nComponents); sizes.Memset(0); FOREACH(c, components) ++sizes[components[c]]; texturePatches.resize(nComponents+1); texturePatches.back().label = NO_ID; FOREACH(f, faces) { const Label label(labels[f]); const FIndex c(components[f]); TexturePatch& texturePatch = texturePatches[c]; ASSERT(texturePatch.label == label || texturePatch.faces.empty()); if (label == NO_ID) { texturePatch.label = NO_ID; texturePatches.back().faces.Insert(f); } else { if ((labelsInvalid[f] != NO_ID) && false) { if (texturePatch.faces.empty()) { texturePatch.label = label; // texturePatch.faces.reserve(sizes[c]); texturePatch.faces.reserve(sizes[c]); } texturePatch.faces = {f}; } else { if (texturePatch.faces.empty()) { texturePatch.label = label; texturePatch.faces.reserve(sizes[c]); } texturePatch.faces.Insert(f); } } } // remove all patches with invalid label (except the last one) // and create the map from the old index to the new one mapIdxPatch.resize(nComponents); std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); for (FIndex t = nComponents; t-- > 0; ) { if (texturePatches[t].label == NO_ID) { texturePatches.RemoveAtMove(t); mapIdxPatch.RemoveAtMove(t); } } const unsigned numPatches(texturePatches.size()-1); uint32_t idxPatch(0); for (IndexArr::IDX i=0; iempty()) { remainingFaces = *faceIndices; } else { remainingFaces.resize(faces.size()); std::iota(remainingFaces.begin(), remainingFaces.end(), 0); } const bool bUseVirtualFaces(minCommonCameras > 0); // extract array of triangles incident to each vertex ListVertexFaces(); // create texture patches { // printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // compute face normals and smoothen them scene.mesh.SmoothNormalFaces(); // printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); // list all views for each face FaceDataViewArr facesDatas; if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) return false; // create faces graph typedef boost::adjacency_list Graph; typedef boost::graph_traits::edge_iterator EdgeIter; typedef boost::graph_traits::out_edge_iterator EdgeOutIter; Graph graph; LabelArr labels; // construct and use virtual faces for patch creation instead of actual mesh faces; // the virtual faces are composed of coplanar triangles sharing same views if (bUseVirtualFaces) { Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it // 1) create FaceToVirtualFaceMap FaceDataViewArr virtualFacesDatas; VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID CreateVirtualFaces5(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); size_t controlCounter(0); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; for (FIndex idxFace : vf) { mapFaceToVirtualFace[idxFace] = idxVF; ++controlCounter; } } ASSERT(controlCounter == faces.size()); // 2) create function to find virtual faces neighbors VirtualFaceIdxsArr virtualFaceNeighbors; { // for each virtual face, the list of virtual faces with at least one vertex in common virtualFaceNeighbors.resize(virtualFaces.size()); FOREACH(idxVF, virtualFaces) { const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; for (FIndex idxFace : vf) { const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; for (int i = 0; i < 3; ++i) { const FIndex fAdj(adjFaces[i]); if (fAdj == NO_ID) continue; if (mapFaceToVirtualFace[fAdj] == idxVF) continue; if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); } } } } } // 3) use virtual faces to build the graph FOREACH(idxFace, virtualFaces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxVirtualFace, virtualFaces) { const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; for (FIndex idxVirtualFaceAdj: afaces) { if (idxVirtualFace >= idxVirtualFaceAdj) continue; const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) continue; boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); } } printf("virtualFacesDatas.size()=%d, facesDatas.size()=%d\n", virtualFacesDatas.size(), facesDatas.size()); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: virtualFacesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.8f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(virtualFaces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, virtualFaces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } FOREACH(f, virtualFacesDatas) { const FaceDataArr& faceDatas = virtualFacesDatas[f]; const size_t numViews = faceDatas.size(); // 跳过无效面片(bInvalidFacesRelative=true) bool hasValidView = false; for (const FaceData& fd : faceDatas) { if (!fd.bInvalidFacesRelative) { hasValidView = true; break; } } if (!hasValidView) { for (const FaceData& faceData: faceDatas) { // 跳过无效视图 if (faceData.bInvalidFacesRelative) continue; const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); float dataCost((1.f-normalizedQuality)*MaxEnergy); dataCost = MaxEnergy; inference.SetDataCost(label, f, dataCost); } continue; } for (const FaceData& faceData: faceDatas) { // 跳过无效视图 if (faceData.bInvalidFacesRelative) continue; const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } // assign the optimal view (label) to each face inference.Optimize(); // extract resulting labeling LabelArr virtualLabels(virtualFaces.size()); virtualLabels.Memset(0xFF); FOREACH(l, virtualLabels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) virtualLabels[l] = label-1; } // 安全地将虚拟面标签映射到网格面 FOREACH(l, labels) { if (l < mapFaceToVirtualFace.size()) { const size_t virtualIdx = mapFaceToVirtualFace[l]; if (virtualIdx < virtualLabels.size()) { labels[l] = virtualLabels[virtualIdx]; } else { labels[l] = NO_ID; } } else { labels[l] = NO_ID; } } #endif } graph.clear(); // 标记虚拟面边界为接缝 FOREACH(idxVF, virtualFaces) { const auto& vf = virtualFaces[idxVF]; for (FIndex fid : vf) { const auto& adjFaces = faceFaces[fid]; for (int i=0; i<3; ++i) { if (adjFaces[i] == NO_ID) continue; const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; if (adjVF != idxVF) { seamEdges.emplace_back(fid, adjFaces[i]); } } } } } // create the graph of faces: each vertex is a face and the edges are the edges shared by the faces FOREACH(idxFace, faces) { MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); ASSERT(idx == idxFace); } FOREACH(idxFace, faces) { const Mesh::FaceFaces& afaces = faceFaces[idxFace]; for (int v=0; v<3; ++v) { const FIndex idxFaceAdj = afaces[v]; if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) continue; const bool bInvisibleFace(facesDatas[idxFace].empty()); const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); if (bInvisibleFace || bInvisibleFaceAdj) { if (bInvisibleFace != bInvisibleFaceAdj) seamEdges.emplace_back(idxFace, idxFaceAdj); continue; } boost::add_edge(idxFace, idxFaceAdj, graph); } } faceFaces.Release(); ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); // start patch creation starting directly from individual faces if (!bUseVirtualFaces) { // assign the best view to each face labels.resize(faces.size()); { // normalize quality values float maxQuality(0); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) if (maxQuality < faceData.quality) maxQuality = faceData.quality; } Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); for (const FaceDataArr& faceDatas: facesDatas) { for (const FaceData& faceData: faceDatas) hist.Add(faceData.quality); } const float normQuality(hist.GetApproximatePermille(0.95f)); #if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP // initialize inference structures const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); LBPInference inference; { inference.SetNumNodes(faces.size()); inference.SetSmoothCost(SmoothnessPotts); EdgeOutIter ei, eie; FOREACH(f, faces) { for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { ASSERT(f == (FIndex)ei->m_source); const FIndex fAdj((FIndex)ei->m_target); if (f < fAdj) // add edges only once inference.SetNeighbors(f, fAdj); } // set costs for label 0 (undefined) inference.SetDataCost((Label)0, f, MaxEnergy); } } FOREACH(f, facesDatas) { const FaceDataArr& faceDatas = facesDatas[f]; // 跳过无效面片(bInvalidFacesRelative=true) bool hasValidView = false; for (const FaceData& fd : faceDatas) { if (!fd.bInvalidFacesRelative) { hasValidView = true; break; } } if (!hasValidView) { continue; } for (const FaceData& faceData: faceDatas) { // 跳过无效视图 if (faceData.bInvalidFacesRelative) continue; const Label label((Label)faceData.idxView+1); const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); const float dataCost((1.f-normalizedQuality)*MaxEnergy); inference.SetDataCost(label, f, dataCost); } } // assign the optimal view (label) to each face inference.Optimize(); // extract resulting labeling labels.Memset(0xFF); FOREACH(l, labels) { const Label label(inference.GetLabel(l)); ASSERT(label < images.size()+1); if (label > 0) labels[l] = label-1; } #endif } } // create texture patches { // divide graph in sub-graphs of connected faces having the same label EdgeIter ei, eie; const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { const FIndex fSource((FIndex)ei->m_source); const FIndex fTarget((FIndex)ei->m_target); ASSERT(components.empty() || components[fSource] == components[fTarget]); if (labels[fSource] != labels[fTarget]) seamEdges.emplace_back(fSource, fTarget); } for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) boost::remove_edge(pEdge->i, pEdge->j, graph); // find connected components: texture patches ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); components.resize(faces.size()); const FIndex nComponents(boost::connected_components(graph, components.data())); // create texture patches; // last texture patch contains all faces with no texture LabelArr sizes(nComponents); sizes.Memset(0); FOREACH(c, components) ++sizes[components[c]]; texturePatches.resize(nComponents+1); texturePatches.back().label = NO_ID; FOREACH(f, faces) { const Label label(labels[f]); const FIndex c(components[f]); TexturePatch& texturePatch = texturePatches[c]; ASSERT(texturePatch.label == label || texturePatch.faces.empty()); if (label == NO_ID) { texturePatch.label = NO_ID; texturePatches.back().faces.Insert(f); } else { if (texturePatch.faces.empty()) { texturePatch.label = label; texturePatch.faces.reserve(sizes[c]); } texturePatch.faces.Insert(f); } } // remove all patches with invalid label (except the last one) // and create the map from the old index to the new one mapIdxPatch.resize(nComponents); std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); for (FIndex t = nComponents; t-- > 0; ) { if (texturePatches[t].label == NO_ID) { texturePatches.RemoveAtMove(t); mapIdxPatch.RemoveAtMove(t); } } const unsigned numPatches(texturePatches.size()-1); uint32_t idxPatch(0); for (IndexArr::IDX i=0; i> compFaces(mapIdxPatch.GetSize()); for (FIndex faceIdx = 0; faceIdx < components.size(); ++faceIdx) { uint32_t compID = components[faceIdx]; if (compID != NO_ID && compID < compFaces.size()) { compFaces[compID].push_back(faceIdx); } } // 检查每个组件是否有对应的纹理块 for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx == NO_ID) { // 这个组件没有对应的纹理块 if (!compFaces[compID].empty()) { DEBUG_EXTRA("Component %u has %zu faces but no patch, reassigning...", compID, compFaces[compID].size()); // 找到最近的纹理块 uint32_t nearestPatchIdx = FindNearestPatchForFaces(compFaces[compID]); if (nearestPatchIdx != NO_ID && nearestPatchIdx < texturePatches.size()) { mapIdxPatch[compID] = nearestPatchIdx; DEBUG_EXTRA(" Reassigned to patch %u", nearestPatchIdx); } else { DEBUG_EXTRA(" Could not find suitable patch, setting to invalid patch"); mapIdxPatch[compID] = static_cast(texturePatches.size() - 1); } } } else if (patchIdx >= texturePatches.size()) { // 纹理块索引越界 DEBUG_EXTRA("Component %u maps to invalid patch %u, fixing...", compID, patchIdx); mapIdxPatch[compID] = NO_ID; if (!compFaces[compID].empty()) { uint32_t nearestPatchIdx = FindNearestPatchForFaces(compFaces[compID]); if (nearestPatchIdx != NO_ID && nearestPatchIdx < texturePatches.size()) { mapIdxPatch[compID] = nearestPatchIdx; DEBUG_EXTRA(" Fixed: reassigned to patch %u", nearestPatchIdx); } else { DEBUG_EXTRA(" Could not find suitable patch, setting to invalid patch"); mapIdxPatch[compID] = static_cast(texturePatches.size() - 1); } } } } } uint32_t MeshTexture::FindNearestPatchForFaces(const std::vector& faceIndices) { if (faceIndices.empty() || texturePatches.empty()) { return NO_ID; } // 收集所有相邻的面 std::unordered_set neighborPatches; for (FIndex fid : faceIndices) { if (fid >= faces.GetSize()) continue; const Face& face = faces[fid]; // 通过顶点查找相邻面 for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) continue; const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace < components.size() && components[adjFace] != NO_ID) { uint32_t compID = components[adjFace]; if (compID < mapIdxPatch.GetSize()) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx != NO_ID && patchIdx < texturePatches.size()) { neighborPatches.insert(patchIdx); } } } } } } // 找到拥有最多相邻面的纹理块 std::unordered_map patchAdjCount; uint32_t bestPatch = NO_ID; int maxCount = 0; for (uint32_t patchIdx : neighborPatches) { if (patchIdx < texturePatches.size()) { int count = 0; // 计算与这个面集中面的相邻面数量 for (FIndex fid : faceIndices) { if (fid >= faces.GetSize()) continue; const Face& face = faces[fid]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) continue; const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace < components.size() && components[adjFace] != NO_ID) { uint32_t compID = components[adjFace]; if (compID < mapIdxPatch.GetSize() && mapIdxPatch[compID] == patchIdx) { count++; } } } } } if (count > maxCount) { maxCount = count; bestPatch = patchIdx; } } } return bestPatch; } void MeshTexture::CreateSeamVertices() { DEBUG_EXTRA("Creating seam vertices with enhanced validation"); // 确保有纹理块 if (texturePatches.size() < 2) { DEBUG_EXTRA("Too few texture patches (%zu), skipping seam vertices creation", texturePatches.size()); seamVertices.Release(); return; } VIndex vs[2]; uint32_t vs0[2], vs1[2]; std::unordered_map mapVertexSeam; // 计算有效纹理块数量(排除无效纹理块) const unsigned numPatches = static_cast(texturePatches.size() - 1); DEBUG_EXTRA("Total patches: %zu, valid patches: %u", texturePatches.size(), numPatches); // 验证组件和映射 if (components.size() != faces.GetSize()) { DEBUG_EXTRA("ERROR: components size mismatch: %zu vs %u", components.size(), faces.GetSize()); return; } if (mapIdxPatch.GetSize() == 0) { DEBUG_EXTRA("ERROR: mapIdxPatch is empty"); return; } seamVertices.Release(); int validEdges = 0; int invalidEdges = 0; int skippedEdges = 0; for (uint32_t edgeIdx = 0; edgeIdx < seamEdges.GetSize(); ++edgeIdx) { const PairIdx& edge = seamEdges[edgeIdx]; // 检查面索引 if (edge.i >= faces.GetSize() || edge.j >= faces.GetSize()) { DEBUG_EXTRA("WARNING: Invalid face indices in seam edge %u: (%u, %u)", edgeIdx, edge.i, edge.j); invalidEdges++; continue; } // 检查组件ID if (edge.i >= components.size() || edge.j >= components.size()) { skippedEdges++; continue; } const uint32_t comp0 = components[edge.i]; const uint32_t comp1 = components[edge.j]; if (comp0 == NO_ID || comp1 == NO_ID) { skippedEdges++; continue; } // 检查组件ID是否有效 if (comp0 >= mapIdxPatch.GetSize() || comp1 >= mapIdxPatch.GetSize()) { DEBUG_EXTRA("WARNING: Component IDs out of range at edge %u: comp0=%u, comp1=%u", edgeIdx, comp0, comp1); invalidEdges++; continue; } const uint32_t idxPatch0 = mapIdxPatch[comp0]; const uint32_t idxPatch1 = mapIdxPatch[comp1]; // 检查纹理块索引是否有效 if (idxPatch0 >= texturePatches.size() || idxPatch1 >= texturePatches.size()) { DEBUG_EXTRA("WARNING: Invalid patch indices at edge %u: idxPatch0=%u, idxPatch1=%u, total patches=%zu", edgeIdx, idxPatch0, idxPatch1, texturePatches.size()); invalidEdges++; continue; } // 检查是否属于同一个纹理块 if (idxPatch0 == idxPatch1) { // 属于同一个纹理块,不是接缝 skippedEdges++; continue; } // 跳过无效纹理块 if (idxPatch0 == texturePatches.size() - 1 || idxPatch1 == texturePatches.size() - 1) { // 至少有一个面在无效纹理块中 skippedEdges++; continue; } // 获取边的顶点 - 直接调用,不检查返回值 scene.mesh.GetEdgeVertices(edge.i, edge.j, vs0, vs1); const Face& faceI = faces[edge.i]; const Face& faceJ = faces[edge.j]; if (vs0[0] >= 3 || vs0[1] >= 3 || vs1[0] >= 3 || vs1[1] >= 3 || faceI[vs0[0]] != faceJ[vs1[0]] || faceI[vs0[1]] != faceJ[vs1[1]]) { DEBUG_EXTRA("WARNING: Edge vertices mismatch at edge %u", edgeIdx); invalidEdges++; continue; } vs[0] = faceI[vs0[0]]; vs[1] = faceI[vs0[1]]; if (vs[0] >= vertices.size() || vs[1] >= vertices.size()) { DEBUG_EXTRA("WARNING: Invalid vertex indices at edge %u: %u, %u", edgeIdx, vs[0], vs[1]); invalidEdges++; continue; } // 创建或获取接缝顶点 auto itSeamVertex0 = mapVertexSeam.emplace(std::make_pair(vs[0], static_cast(seamVertices.GetSize()))); if (itSeamVertex0.second) { seamVertices.emplace_back(vs[0]); } SeamVertex& seamVertex0 = seamVertices[itSeamVertex0.first->second]; auto itSeamVertex1 = mapVertexSeam.emplace(std::make_pair(vs[1], static_cast(seamVertices.GetSize()))); if (itSeamVertex1.second) { seamVertices.emplace_back(vs[1]); } SeamVertex& seamVertex1 = seamVertices[itSeamVertex1.first->second]; // 为纹理块0添加边 { const TexCoord offset0(texturePatches[idxPatch0].rect.tl()); uint32_t texCoordIdx0 = edge.i * 3 + vs0[0]; uint32_t texCoordIdx1 = edge.i * 3 + vs0[1]; if (texCoordIdx0 < faceTexcoords.GetSize() && texCoordIdx1 < faceTexcoords.GetSize()) { SeamVertex::Patch& patch00 = seamVertex0.GetPatch(idxPatch0); SeamVertex::Patch& patch10 = seamVertex1.GetPatch(idxPatch0); if (patch00.edges.Find(itSeamVertex1.first->second) == NO_ID) { patch00.edges.emplace_back(itSeamVertex1.first->second).idxFace = edge.i; patch00.proj = faceTexcoords[texCoordIdx0] + offset0; } if (patch10.edges.Find(itSeamVertex0.first->second) == NO_ID) { patch10.edges.emplace_back(itSeamVertex0.first->second).idxFace = edge.i; patch10.proj = faceTexcoords[texCoordIdx1] + offset0; } } } // 为纹理块1添加边 { const TexCoord offset1(texturePatches[idxPatch1].rect.tl()); uint32_t texCoordIdx0 = edge.j * 3 + vs1[0]; uint32_t texCoordIdx1 = edge.j * 3 + vs1[1]; if (texCoordIdx0 < faceTexcoords.GetSize() && texCoordIdx1 < faceTexcoords.GetSize()) { SeamVertex::Patch& patch01 = seamVertex0.GetPatch(idxPatch1); SeamVertex::Patch& patch11 = seamVertex1.GetPatch(idxPatch1); if (patch01.edges.Find(itSeamVertex1.first->second) == NO_ID) { patch01.edges.emplace_back(itSeamVertex1.first->second).idxFace = edge.j; patch01.proj = faceTexcoords[texCoordIdx0] + offset1; } if (patch11.edges.Find(itSeamVertex0.first->second) == NO_ID) { patch11.edges.emplace_back(itSeamVertex0.first->second).idxFace = edge.j; patch11.proj = faceTexcoords[texCoordIdx1] + offset1; } } } validEdges++; } seamEdges.Release(); DEBUG_EXTRA("Seam vertices created: %u vertices, %d valid edges, %d invalid edges, %d skipped edges", seamVertices.GetSize(), validEdges, invalidEdges, skippedEdges); } // Native void MeshTexture::GlobalSeamLeveling3() { ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); // Create a boolean array to mark invalid vertices BoolArr vertexInvalid(vertices.size()); vertexInvalid.Memset(false); FOREACH(f, faces) { if (labelsInvalid[f] != NO_ID) { const Face& face = faces[f]; for (int v=0; v<3; ++v) vertexInvalid[face[v]] = true; } } // find the patch ID for each vertex PatchIndices patchIndices(vertices.size()); patchIndices.Memset(0); FOREACH(f, faces) { // if (labelsInvalid[f] != NO_ID) // continue; const uint32_t idxPatch(mapIdxPatch[components[f]]); const Face& face = faces[f]; for (int v=0; v<3; ++v) patchIndices[face[v]].idxPatch = idxPatch; } FOREACH(i, seamVertices) { const SeamVertex& seamVertex = seamVertices[i]; ASSERT(!seamVertex.patches.empty()); PatchIndex& patchIndex = patchIndices[seamVertex.idxVertex]; patchIndex.bIndex = true; patchIndex.idxSeamVertex = i; } // assign a row index within the solution vector x to each vertex/patch ASSERT(vertices.size() < static_cast(std::numeric_limits::max())); MatIdx rowsX(0); typedef std::unordered_map VertexPatch2RowMap; cList vertpatch2rows(vertices.size()); FOREACH(i, vertices) { const PatchIndex& patchIndex = patchIndices[i]; VertexPatch2RowMap& vertpatch2row = vertpatch2rows[i]; if (patchIndex.bIndex) { // vertex is part of multiple patches const SeamVertex& seamVertex = seamVertices[patchIndex.idxSeamVertex]; ASSERT(seamVertex.idxVertex == i); for (const SeamVertex::Patch& patch: seamVertex.patches) { ASSERT(patch.idxPatch != numPatches); vertpatch2row[patch.idxPatch] = rowsX++; } } else if (patchIndex.idxPatch < numPatches) { // vertex is part of only one patch vertpatch2row[patchIndex.idxPatch] = rowsX++; } } // fill Tikhonov's Gamma matrix (regularization constraints) const float lambda(0.1f); MatIdx rowsGamma(0); Mesh::VertexIdxArr adjVerts; CLISTDEF0(MatEntry) rows(0, vertices.size()*4); FOREACH(v, vertices) { adjVerts.Empty(); scene.mesh.GetAdjVertices(v, adjVerts); VertexPatchIterator itV(patchIndices[v], seamVertices); while (itV.Next()) { const uint32_t idxPatch(itV); if (idxPatch == numPatches) continue; const MatIdx col(vertpatch2rows[v].at(idxPatch)); for (const VIndex vAdj: adjVerts) { if (v >= vAdj) continue; VertexPatchIterator itVAdj(patchIndices[vAdj], seamVertices); while (itVAdj.Next()) { const uint32_t idxPatchAdj(itVAdj); if (idxPatch == idxPatchAdj) { const MatIdx colAdj(vertpatch2rows[vAdj].at(idxPatchAdj)); float currentLambda = (vertexInvalid[v] || vertexInvalid[vAdj]) ? 0.01f : 0.1f; // float currentLambda = 1.0f; rows.emplace_back(rowsGamma, col, currentLambda); rows.emplace_back(rowsGamma, colAdj, -currentLambda); ++rowsGamma; } } } } } ASSERT(rows.size()/2 < static_cast(std::numeric_limits::max())); SparseMat Gamma(rowsGamma, rowsX); Gamma.setFromTriplets(rows.Begin(), rows.End()); rows.Empty(); // fill the matrix A and the coefficients for the Vector b of the linear equation system IndexArr indices; Colors vertexColors; Colors coeffB; for (const SeamVertex& seamVertex: seamVertices) { if (seamVertex.patches.size() < 2) continue; seamVertex.SortByPatchIndex(indices); vertexColors.resize(indices.size()); FOREACH(i, indices) { const SeamVertex::Patch& patch0 = seamVertex.patches[indices[i]]; ASSERT(patch0.idxPatch < numPatches); SampleImage sampler(images[texturePatches[patch0.idxPatch].label].image); for (const SeamVertex::Patch::Edge& edge: patch0.edges) { const SeamVertex& seamVertex1 = seamVertices[edge.idxSeamVertex]; const SeamVertex::Patches::IDX idxPatch1(seamVertex1.patches.Find(patch0.idxPatch)); ASSERT(idxPatch1 != SeamVertex::Patches::NO_INDEX); const SeamVertex::Patch& patch1 = seamVertex1.patches[idxPatch1]; sampler.AddEdge(patch0.proj, patch1.proj); } vertexColors[i] = sampler.GetColor(); } const VertexPatch2RowMap& vertpatch2row = vertpatch2rows[seamVertex.idxVertex]; for (IDX i=0; i(std::numeric_limits::max())); const MatIdx rowsA((MatIdx)coeffB.size()); SparseMat A(rowsA, rowsX); A.setFromTriplets(rows.Begin(), rows.End()); rows.Release(); SparseMat Lhs(A.transpose() * A + Gamma.transpose() * Gamma); // CG uses only the lower triangle, so prune the rest and compress matrix Lhs.prune([](const int& row, const int& col, const float&) -> bool { return col <= row; }); // globally solve for the correction colors Eigen::Matrix colorAdjustments(rowsX, 3); { // init CG solver Eigen::ConjugateGradient solver; solver.setMaxIterations(1000); solver.setTolerance(0.0001f); solver.compute(Lhs); ASSERT(solver.info() == Eigen::Success); #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for #endif for (int channel=0; channel<3; ++channel) { // init right hand side vector const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, rowsA); const Eigen::VectorXf Rhs(SparseMat(A.transpose()) * b); // solve for x const Eigen::VectorXf x(solver.solve(Rhs)); ASSERT(solver.info() == Eigen::Success); // subtract mean since the system is under-constrained and // we need the solution with minimal adjustments Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> >(colorAdjustments.data()+channel, rowsX) = x.array() - x.mean(); DEBUG_LEVEL(3, "\tcolor channel %d: %d iterations, %g residual", channel, solver.iterations(), solver.error()); } } // adjust texture patches using the correction colors #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)numPatches; ++i) { #else for (unsigned i=0; i(imageAdj, Color::ZERO); // apply color correction to the patch image cv::Mat image(images[texturePatch.label].image(texturePatch.rect)); for (int r=0; r(r,c); const Color col(RGB2YCBCR(Color(v))); const Color acol(YCBCR2RGB(Color(col+a))); for (int p=0; p<3; ++p) v[p] = (uint8_t)CLAMP(ROUND2INT(acol[p]), 0, 255); } } } } // set to one in order to dilate also on the diagonal of the border // (normally not needed) #define DILATE_EXTRA 0 void MeshTexture::ProcessMask(Image8U& mask, int stripWidth) { typedef Image8U::Type Type; // dilate and erode around the border, // in order to fill all gaps and remove outside pixels // (due to imperfect overlay of the raster line border and raster faces) #define DILATEDIR(rd,cd) { \ Type& vi = mask(r+(rd),c+(cd)); \ if (vi != border) \ vi = interior; \ } const int HalfSize(1); const int RowsEnd(mask.rows-HalfSize); const int ColsEnd(mask.cols-HalfSize); /* float depthThreshold = 0.1f; for (int r=0; r depthThreshold) { mask(r,c) = empty; } } } //*/ for (int r=HalfSize; r PixelSet; PixelSet borderPixels; for (int y=0; y PixelVector; for (int s=0; sx+i), yn(it->y+j); if (ISINSIDE(xn, 0, width) && ISINSIDE(yn, 0, height) && !ISEMPTY(orgMask, xn, yn)) borderPixels.insert(ImageRef(xn,yn)); } } } } #undef ISEMPTY // mark all remaining pixels empty in the mask for (int y=0; y indices(dst.size()); indices.memset(0xff); MatIdx nnz(0); for (int i = 0; i < n; ++i) if (mask(i) != empty) indices(i) = nnz++; if (nnz <= 0) return; Colors coeffB(nnz); CLISTDEF0(MatEntry) coeffA(0, nnz); for (int i = 0; i < n; ++i) { switch (mask(i)) { case border: { const MatIdx idx(indices(i)); ASSERT(idx != -1); coeffA.emplace_back(idx, idx, 1.f); coeffB[idx] = (const Color&)dst(i); } break; case interior: { const MatIdx idxUp(indices(i - width)); const MatIdx idxLeft(indices(i - 1)); const MatIdx idxCenter(indices(i)); const MatIdx idxRight(indices(i + 1)); const MatIdx idxDown(indices(i + width)); // all indices should be either border conditions or part of the optimization ASSERT(idxUp != -1 && idxLeft != -1 && idxCenter != -1 && idxRight != -1 && idxDown != -1); coeffA.emplace_back(idxCenter, idxUp, 1.f); coeffA.emplace_back(idxCenter, idxLeft, 1.f); coeffA.emplace_back(idxCenter, idxCenter,-4.f); coeffA.emplace_back(idxCenter, idxRight, 1.f); coeffA.emplace_back(idxCenter, idxDown, 1.f); // set target coefficient coeffB[idxCenter] = (bias == 1.f ? ColorLaplacian(src,i) : ColorLaplacian(src,i)*bias + ColorLaplacian(dst,i)*(1.f-bias)); } break; } } SparseMat A(nnz, nnz); A.setFromTriplets(coeffA.Begin(), coeffA.End()); coeffA.Release(); #ifdef TEXOPT_SOLVER_SPARSELU // use SparseLU factorization // (faster, but not working if EIGEN_DEFAULT_TO_ROW_MAJOR is defined, bug inside Eigen) const Eigen::SparseLU< SparseMat, Eigen::COLAMDOrdering > solver(A); #else // use BiCGSTAB solver const Eigen::BiCGSTAB< SparseMat, Eigen::IncompleteLUT > solver(A); #endif ASSERT(solver.info() == Eigen::Success); for (int channel=0; channel<3; ++channel) { const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, nnz); const Eigen::VectorXf x(solver.solve(b)); ASSERT(solver.info() == Eigen::Success); for (int i = 0; i < n; ++i) { const MatIdx index(indices(i)); if (index != -1) dst(i)[channel] = x[index]; } } } // Native void MeshTexture::LocalSeamLeveling3() { ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); // adjust texture patches locally, so that the border continues smoothly inside the patch #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)numPatches; ++i) { #else for (unsigned i=0; i(sampler, samplePos0)); const TexCoord samplePos1(p1 + p1Dir * l); const Color color1(image1.sample(sampler, samplePos1)/255.f); image(pt) = Color((color0 + color1) * 0.5f); // set mask edge also mask(pt) = border; } } data(image, mask, imageOrg, image1, p0, p0Adj, p1, p1Adj); Image32F3::DrawLine(p0, p0Adj, data); // skip remaining patches, // as a manifold edge is shared by maximum two face (one in each patch), which we found already break; } } // render the vertex at the patch border meeting neighbor patches AccumColor accumColor; // for each patch... for (const SeamVertex::Patch& patch: seamVertex0.patches) { // add its view to the vertex mean color const Image8U3& img(images[texturePatches[patch.idxPatch].label].image); accumColor.Add(img.sample(sampler, patch.proj)/255.f, 1.f); } const ImageRef pt(ROUND2INT(patch0.proj-offset)); image(pt) = accumColor.Normalized(); mask(pt) = border; } // make sure the border is continuous and // keep only the exterior tripe of the given size #ifdef USE_CUDA if (MeshTextureCUDA::ProcessMaskCUDA(mask, 20)) { // printf("Success ProcessMaskCUDA!\n"); // 成功使用CUDA加速 } else { // 回退到CPU版本 // printf("Failed ProcessMaskCUDA!\n"); ProcessMask(mask, 20); } #else ProcessMask(mask, 20); #endif // compute texture patch blending #ifdef USE_CUDA if (MeshTextureCUDA::PoissonBlendCUDA(image, imageOrg, mask, 1.0f)) { // printf("Success PoissonBlendCUDA!"); // 成功使用CUDA加速 } else { // 回退到CPU版本 // printf("Failed PoissonBlendCUDA!"); PoissonBlending(imageOrg, image, mask, 1.0f); } #else PoissonBlending(imageOrg, image, mask); #endif // apply color correction to the patch image cv::Mat imagePatch(image0(texturePatch.rect)); #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for collapse(2) #endif for (int r=0; r(r,c); for (int p=0; p<3; ++p) v[p] = (uint8_t)CLAMP(ROUND2INT(a[p]*255.f), 0, 255); } } } } void MeshTexture::GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename, bool bOriginFaceview, Scene *pScene) { bool bUseExternalUV = false; if (!pScene->mesh.faceTexcoords.empty() && pScene->mesh.faceTexcoords.size() == pScene->mesh.faces.size() * 3) { bUseExternalUV = true; DEBUG_EXTRA("使用外部UV数据,跳过自动UV生成流程"); } DEBUG_EXTRA("GenerateTexture bUseExternalUV=%b", bUseExternalUV); // Bruce // bGlobalSeamLeveling = false; // bLocalSeamLeveling = false; // project patches in the corresponding view and compute texture-coordinates and bounding-box // Bruce int border = 2; if (!bOriginFaceview) border = 4; // ===== 修改:只在非外部UV模式下初始化faceTexcoords ===== if (!bUseExternalUV) { faceTexcoords.resize(faces.size()*3); faceTexindices.resize(faces.size()); } #ifdef TEXOPT_USE_OPENMP // LOG_OUT() << "def TEXOPT_USE_OPENMP" << std::endl; const unsigned numPatches(texturePatches.size()-1); // ===== 修改:只在非外部UV模式下执行投影计算 ===== if (!bUseExternalUV) { #pragma omp parallel for schedule(dynamic) for (int_t idx=0; idx<(int_t)numPatches; ++idx) { TexturePatch& texturePatch = texturePatches[(uint32_t)idx]; #else for (TexturePatch *pTexturePatch=texturePatches.Begin(), *pTexturePatchEnd=texturePatches.End()-1; pTexturePatch 2 && (bGlobalSeamLeveling || bLocalSeamLeveling)) { // create seam vertices and edges CreateSeamVertices(); // perform global seam leveling if (bGlobalSeamLeveling) { TD_TIMER_STARTD(); if (bUseExternalUV) { // 外部UV数据可能需要更温和的接缝处理 GlobalSeamLevelingExternalUV(); } else { GlobalSeamLeveling3(); } DEBUG_EXTRA("\tglobal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } // perform local seam leveling if (bLocalSeamLeveling) { TD_TIMER_STARTD(); // LocalSeamLeveling(); if (bUseExternalUV) { // 外部UV数据可能需要不同的局部接缝处理 LocalSeamLevelingExternalUV(); } else { LocalSeamLeveling3(); } DEBUG_EXTRA("\tlocal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } } DEBUG_EXTRA("seam (%s)", TD_TIMER_GET_FMT().c_str()); // ===== 修改:纹理块合并逻辑调整 ===== // 外部UV数据可能需要跳过纹理块合并,因为UV已经固定 if (!bUseExternalUV) { // merge texture patches with overlapping rectangles for (unsigned i=0; i 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { DEBUG("error: a patch of size %u x %u does not fit the texture", texturePatches[i].rect.width, texturePatches[i].rect.height); ABORT("the maximum texture size chosen cannot fit a patch"); } unplacedRects[i] = {texturePatches[i].rect, i}; } LOG_OUT() << "unplacedRects loop completed" << std::endl; LOG_OUT() << "pack patches: one pack per texture file loop completed" << std::endl; // pack patches: one pack per texture file CLISTDEF2IDX(RectsBinPack::RectWIdxArr, TexIndex) placedRects; { // increase texture size till all patches fit // Bruce unsigned typeRectsBinPack(nRectPackingHeuristic/100); unsigned typeSplit((nRectPackingHeuristic-typeRectsBinPack*100)/10); unsigned typeHeuristic(nRectPackingHeuristic%10); if (!bOriginFaceview && false) { typeRectsBinPack = 1; typeSplit = 0; typeHeuristic = 1; } int textureSize = 0; if (bUseExternalUV) { // 对于外部UV数据,直接使用现有的纹理坐标,跳过复杂的打包流程 DEBUG_EXTRA("使用外部UV数据,简化纹理图集生成流程"); // 创建单个纹理图集 textureSize = maxTextureSize > 0 ? maxTextureSize : 4096; // 默认大小 texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); // 直接使用现有的faceTexcoords和faceTexindices // 不需要重新计算纹理坐标偏移 DEBUG_EXTRA("外部UV纹理映射完成: 使用现有UV坐标"); } else { while (!unplacedRects.empty()) { TD_TIMER_STARTD(); if (textureSize == 0) { textureSize = RectsBinPack::ComputeTextureSize(unplacedRects, nTextureSizeMultiple); if (maxTextureSize > 0 && textureSize > maxTextureSize) textureSize = maxTextureSize; } RectsBinPack::RectWIdxArr newPlacedRects; switch (typeRectsBinPack) { case 0: { MaxRectsBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, (MaxRectsBinPack::FreeRectChoiceHeuristic)typeHeuristic); break; } case 1: { SkylineBinPack pack(textureSize, textureSize, typeSplit!=0); newPlacedRects = pack.Insert(unplacedRects, (SkylineBinPack::LevelChoiceHeuristic)typeHeuristic); break; } case 2: { GuillotineBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, false, (GuillotineBinPack::FreeRectChoiceHeuristic)typeHeuristic, (GuillotineBinPack::GuillotineSplitHeuristic)typeSplit); break; } default: ABORT("error: unknown RectsBinPack type"); } DEBUG_ULTIMATE("\tpacking texture completed: %u initial patches, %u placed patches, %u texture-size, %u textures (%s)", texturePatches.size(), newPlacedRects.size(), textureSize, placedRects.size(), TD_TIMER_GET_FMT().c_str()); if (textureSize == maxTextureSize || unplacedRects.empty()) { // create texture image placedRects.emplace_back(std::move(newPlacedRects)); // Pixel8U colEmpty2=Pixel8U(0,0,255); texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); textureSize = 0; } else { // try again with a bigger texture textureSize *= 2; if (maxTextureSize > 0) textureSize = std::max(textureSize, maxTextureSize); unplacedRects.JoinRemove(newPlacedRects); } } } } LOG_OUT() << "Third loop completed" << std::endl; Mesh::FaceIdxArr emptyFaceIndexes; if (!bUseExternalUV) { #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int_t i=0; i<(int_t)placedRects.size(); ++i) { for (int_t j=0; j<(int_t)placedRects[(TexIndex)i].size(); ++j) { const TexIndex idxTexture((TexIndex)i); const uint32_t idxPlacedPatch((uint32_t)j); #else FOREACH(idxTexture, placedRects) { FOREACH(idxPlacedPatch, placedRects[idxTexture]) { #endif const TexturePatch& texturePatch = texturePatches[placedRects[idxTexture][idxPlacedPatch].patchIdx]; const RectsBinPack::Rect& rect = placedRects[idxTexture][idxPlacedPatch].rect; // copy patch image ASSERT((rect.width == texturePatch.rect.width && rect.height == texturePatch.rect.height) || (rect.height == texturePatch.rect.width && rect.width == texturePatch.rect.height)); int x(0), y(1); if (texturePatch.label != NO_ID) { const Image& imageData = images[texturePatch.label]; cv::Mat patch(imageData.image(texturePatch.rect)); if (rect.width != texturePatch.rect.width) { // flip patch and texture-coordinates patch = patch.t(); x = 1; y = 0; } patch.copyTo(texturesDiffuse[idxTexture](rect)); } else { //* auto it = texturePatch.faces.begin(); while (it != texturePatch.faces.end()) { emptyFaceIndexes.push_back(*it); ++it; } //*/ /* // 处理无效贴片:使用备用纹理 if (alternativeTexture != nullptr) { // 使用备用纹理进行采样 cv::Mat patch(rect.size(), CV_8UC3); for (int r = 0; r < patch.rows; ++r) { for (int c = 0; c < patch.cols; ++c) { // 计算UV坐标:将像素位置映射到备用纹理的UV空间 float u = (float)c / patch.cols; float v = (float)r / patch.rows; // 从备用纹理中采样 int xSrc = static_cast(u * alternativeTexture->width()); int ySrc = static_cast(v * alternativeTexture->height()); xSrc = std::min(std::max(xSrc, 0), alternativeTexture->width() - 1); ySrc = std::min(std::max(ySrc, 0), alternativeTexture->height() - 1); Pixel8U color = (*alternativeTexture)(ySrc, xSrc); patch.at(r, c) = color; } } // Pixel8U colEmpty2=Pixel8U(0,0,255); // cv::Mat patch2(rect.size(), CV_8UC3, cv::Scalar(colEmpty2.b, colEmpty2.g, colEmpty2.r)); // patch2.copyTo(texturesDiffuse[idxTexture](rect)); patch.copyTo(texturesDiffuse[idxTexture](rect)); } else { // 没有备用纹理,使用默认颜色 // Pixel8U colEmpty2=Pixel8U(0,0,255); cv::Mat patch(rect.size(), CV_8UC3, cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); patch.copyTo(texturesDiffuse[idxTexture](rect)); } */ } // compute final texture coordinates const TexCoord offset(rect.tl()); for (const FIndex idxFace: texturePatch.faces) { TexCoord* texcoords = faceTexcoords.data()+idxFace*3; faceTexindices[idxFace] = idxTexture; for (int v=0; v<3; ++v) { TexCoord& texcoord = texcoords[v]; texcoord = TexCoord( texcoord[x]+offset.x, texcoord[y]+offset.y ); } } } } } else { // 外部UV数据:直接使用现有的纹理坐标,不需要重新计算 DEBUG_EXTRA("跳过自动颜色采样,使用外部UV坐标"); } if (texturesDiffuse.size() == 1) faceTexindices.Release(); /* // Lab颜色空间处理(解决过曝/过暗问题) if (bGlobalSeamLeveling) { for (auto& texture : texturesDiffuse) { // 安全检查:空图像、非3通道BGR格式 if (texture.empty() || texture.channels() != 3) { LOG_OUT() << "Skipping invalid texture: empty=" << texture.empty() << ", channels=" << texture.channels() << std::endl; continue; } // 创建临时Lab图像 cv::Mat labImage; cv::cvtColor(texture, labImage, cv::COLOR_BGR2Lab); // 分离通道 std::vector labChannels; cv::split(labImage, labChannels); // 使用CLAHE限制对比度增强 cv::Ptr clahe = cv::createCLAHE(); clahe->setClipLimit(2.0); clahe->apply(labChannels[0], labChannels[0]); // 钳制Lab通道数值范围(防溢出) // 使用安全循环替代forEach,避免内存问题 for (int r = 0; r < labImage.rows; ++r) { for (int c = 0; c < labImage.cols; ++c) { cv::Vec3b& pixel = labImage.at(r, c); // 注意:这里使用Vec3b而非Vec3f pixel[0] = cv::saturate_cast(pixel[0]); // L ∈ [0,255] pixel[1] = cv::saturate_cast(pixel[1]); // a ∈ [0,255] pixel[2] = cv::saturate_cast(pixel[2]); // b ∈ [0,255] } } // 合并通道并转回BGR cv::merge(labChannels, labImage); cv::cvtColor(labImage, texture, cv::COLOR_Lab2BGR); } } */ // apply some sharpening if (fSharpnessWeight > 0) { constexpr double sigma = 1.5; for (auto &textureDiffuse: texturesDiffuse) { Image8U3 blurryTextureDiffuse; cv::GaussianBlur(textureDiffuse, blurryTextureDiffuse, cv::Size(), sigma); cv::addWeighted(textureDiffuse, 1+fSharpnessWeight, blurryTextureDiffuse, -fSharpnessWeight, 0, textureDiffuse); } } LOG_OUT() << "Fourth loop completed" << std::endl; std::ofstream out(basename + "_empty_color_triangles.txt"); RFOREACHPTR(pIdxF, emptyFaceIndexes) { out << *pIdxF << "\n"; } out.close(); } } void MeshTexture::GenerateTextureForUV(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename, bool bOriginFaceview, Scene *pScene, Mesh::TexCoordArr& existingTexcoords, Mesh::TexIndexArr& existingTexindices) { int border = 2; if (!bOriginFaceview) border = 4; Mesh::TexCoordArr& faceTexcoords2 = existingTexcoords; Mesh::TexIndexArr& faceTexindices2 = existingTexindices; faceTexcoords2.resize(faces.size()*3); faceTexindices2.resize(faces.size()); #ifdef TEXOPT_USE_OPENMP // LOG_OUT() << "def TEXOPT_USE_OPENMP" << std::endl; const unsigned numPatches(texturePatches.size()-1); // ===== 修改:只在非外部UV模式下执行投影计算 ===== { #pragma omp parallel for schedule(dynamic) for (int_t idx=0; idx<(int_t)numPatches; ++idx) { TexturePatch& texturePatch = texturePatches[(uint32_t)idx]; #else for (TexturePatch *pTexturePatch=texturePatches.Begin(), *pTexturePatchEnd=texturePatches.End()-1; pTexturePatch 2 && (bGlobalSeamLeveling || bLocalSeamLeveling)) { // create seam vertices and edges CreateSeamVertices(); // perform global seam leveling if (bGlobalSeamLeveling) { TD_TIMER_STARTD(); if (bUseExternalUV) { // 外部UV数据可能需要更温和的接缝处理 GlobalSeamLevelingExternalUV(); } else { GlobalSeamLeveling3(); } DEBUG_EXTRA("\tglobal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } // perform local seam leveling if (bLocalSeamLeveling) { TD_TIMER_STARTD(); // LocalSeamLeveling(); if (bUseExternalUV) { // 外部UV数据可能需要不同的局部接缝处理 LocalSeamLevelingExternalUV(); } else { LocalSeamLeveling3(); } DEBUG_EXTRA("\tlocal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } } DEBUG_EXTRA("seam (%s)", TD_TIMER_GET_FMT().c_str()); */ { // merge texture patches with overlapping rectangles for (unsigned i=0; i 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { DEBUG("error: a patch of size %u x %u does not fit the texture", texturePatches[i].rect.width, texturePatches[i].rect.height); ABORT("the maximum texture size chosen cannot fit a patch"); } unplacedRects[i] = {texturePatches[i].rect, i}; } LOG_OUT() << "unplacedRects loop completed" << std::endl; LOG_OUT() << "pack patches: one pack per texture file loop completed" << std::endl; // pack patches: one pack per texture file CLISTDEF2IDX(RectsBinPack::RectWIdxArr, TexIndex) placedRects; { // increase texture size till all patches fit // Bruce unsigned typeRectsBinPack(nRectPackingHeuristic/100); unsigned typeSplit((nRectPackingHeuristic-typeRectsBinPack*100)/10); unsigned typeHeuristic(nRectPackingHeuristic%10); if (!bOriginFaceview && false) { typeRectsBinPack = 1; typeSplit = 0; typeHeuristic = 1; } int textureSize = 0; { while (!unplacedRects.empty()) { TD_TIMER_STARTD(); if (textureSize == 0) { textureSize = RectsBinPack::ComputeTextureSize(unplacedRects, nTextureSizeMultiple); if (maxTextureSize > 0 && textureSize > maxTextureSize) textureSize = maxTextureSize; } RectsBinPack::RectWIdxArr newPlacedRects; switch (typeRectsBinPack) { case 0: { MaxRectsBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, (MaxRectsBinPack::FreeRectChoiceHeuristic)typeHeuristic); break; } case 1: { SkylineBinPack pack(textureSize, textureSize, typeSplit!=0); newPlacedRects = pack.Insert(unplacedRects, (SkylineBinPack::LevelChoiceHeuristic)typeHeuristic); break; } case 2: { GuillotineBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, false, (GuillotineBinPack::FreeRectChoiceHeuristic)typeHeuristic, (GuillotineBinPack::GuillotineSplitHeuristic)typeSplit); break; } default: ABORT("error: unknown RectsBinPack type"); } DEBUG_ULTIMATE("\tpacking texture completed: %u initial patches, %u placed patches, %u texture-size, %u textures (%s)", texturePatches.size(), newPlacedRects.size(), textureSize, placedRects.size(), TD_TIMER_GET_FMT().c_str()); if (textureSize == maxTextureSize || unplacedRects.empty()) { // create texture image placedRects.emplace_back(std::move(newPlacedRects)); // Pixel8U colEmpty2=Pixel8U(0,0,255); texturesDiffuseTemp.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); textureSize = 0; } else { // try again with a bigger texture textureSize *= 2; if (maxTextureSize > 0) textureSize = std::max(textureSize, maxTextureSize); unplacedRects.JoinRemove(newPlacedRects); } } } } LOG_OUT() << "Third loop completed" << std::endl; Mesh::FaceIdxArr emptyFaceIndexes; { #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int_t i=0; i<(int_t)placedRects.size(); ++i) { for (int_t j=0; j<(int_t)placedRects[(TexIndex)i].size(); ++j) { const TexIndex idxTexture((TexIndex)i); const uint32_t idxPlacedPatch((uint32_t)j); #else FOREACH(idxTexture, placedRects) { FOREACH(idxPlacedPatch, placedRects[idxTexture]) { #endif const TexturePatch& texturePatch = texturePatches[placedRects[idxTexture][idxPlacedPatch].patchIdx]; const RectsBinPack::Rect& rect = placedRects[idxTexture][idxPlacedPatch].rect; // copy patch image ASSERT((rect.width == texturePatch.rect.width && rect.height == texturePatch.rect.height) || (rect.height == texturePatch.rect.width && rect.width == texturePatch.rect.height)); int x(0), y(1); if (texturePatch.label != NO_ID) { const Image& imageData = images[texturePatch.label]; cv::Mat patch(imageData.image(texturePatch.rect)); if (rect.width != texturePatch.rect.width) { // flip patch and texture-coordinates patch = patch.t(); x = 1; y = 0; } patch.copyTo(texturesDiffuseTemp[idxTexture](rect)); } else { auto it = texturePatch.faces.begin(); while (it != texturePatch.faces.end()) { emptyFaceIndexes.push_back(*it); ++it; } } // compute final texture coordinates const TexCoord offset(rect.tl()); for (const FIndex idxFace: texturePatch.faces) { TexCoord* texcoords = faceTexcoords2.data()+idxFace*3; faceTexindices2[idxFace] = idxTexture; for (int v=0; v<3; ++v) { TexCoord& texcoord = texcoords[v]; texcoord = TexCoord( texcoord[x]+offset.x, texcoord[y]+offset.y ); } } } } } if (texturesDiffuseTemp.size() == 1) faceTexindices2.Release(); // apply some sharpening if (fSharpnessWeight > 0) { constexpr double sigma = 1.5; for (auto &textureDiffuse: texturesDiffuseTemp) { Image8U3 blurryTextureDiffuse; cv::GaussianBlur(textureDiffuse, blurryTextureDiffuse, cv::Size(), sigma); cv::addWeighted(textureDiffuse, 1+fSharpnessWeight, blurryTextureDiffuse, -fSharpnessWeight, 0, textureDiffuse); } } LOG_OUT() << "Fourth loop completed" << std::endl; std::ofstream out(basename + "_empty_color_triangles.txt"); RFOREACHPTR(pIdxF, emptyFaceIndexes) { out << *pIdxF << "\n"; } out.close(); } } void MeshTexture::GlobalSeamLevelingExternalUV() { // 针对外部UV数据的全局接缝处理 // 实现更温和的接缝处理算法 } void MeshTexture::LocalSeamLevelingExternalUV() { // 针对外部UV数据的局部接缝处理 // 实现保留原始UV特征的接缝处理 } // New void MeshTexture::GlobalSeamLeveling() { ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); // find the patch ID for each vertex PatchIndices patchIndices(vertices.size()); patchIndices.Memset(0); FOREACH(f, faces) { const uint32_t idxPatch(mapIdxPatch[components[f]]); const Face& face = faces[f]; for (int v=0; v<3; ++v) patchIndices[face[v]].idxPatch = idxPatch; } FOREACH(i, seamVertices) { const SeamVertex& seamVertex = seamVertices[i]; ASSERT(!seamVertex.patches.empty()); PatchIndex& patchIndex = patchIndices[seamVertex.idxVertex]; patchIndex.bIndex = true; patchIndex.idxSeamVertex = i; } // assign a row index within the solution vector x to each vertex/patch ASSERT(vertices.size() < static_cast(std::numeric_limits::max())); MatIdx rowsX(0); typedef std::unordered_map VertexPatch2RowMap; cList vertpatch2rows(vertices.size()); FOREACH(i, vertices) { const PatchIndex& patchIndex = patchIndices[i]; VertexPatch2RowMap& vertpatch2row = vertpatch2rows[i]; if (patchIndex.bIndex) { // vertex is part of multiple patches const SeamVertex& seamVertex = seamVertices[patchIndex.idxSeamVertex]; ASSERT(seamVertex.idxVertex == i); for (const SeamVertex::Patch& patch: seamVertex.patches) { ASSERT(patch.idxPatch != numPatches); vertpatch2row[patch.idxPatch] = rowsX++; } } else if (patchIndex.idxPatch < numPatches) { // vertex is part of only one patch vertpatch2row[patchIndex.idxPatch] = rowsX++; } } // fill Tikhonov's Gamma matrix (regularization constraints) // Bruce // const float lambda(0.1f); const float lambda(0.8f); MatIdx rowsGamma(0); Mesh::VertexIdxArr adjVerts; CLISTDEF0(MatEntry) rows(0, vertices.size()*4); FOREACH(v, vertices) { adjVerts.Empty(); scene.mesh.GetAdjVertices(v, adjVerts); VertexPatchIterator itV(patchIndices[v], seamVertices); while (itV.Next()) { const uint32_t idxPatch(itV); if (idxPatch == numPatches) continue; const MatIdx col(vertpatch2rows[v].at(idxPatch)); for (const VIndex vAdj: adjVerts) { if (v >= vAdj) continue; VertexPatchIterator itVAdj(patchIndices[vAdj], seamVertices); while (itVAdj.Next()) { const uint32_t idxPatchAdj(itVAdj); if (idxPatch == idxPatchAdj) { const MatIdx colAdj(vertpatch2rows[vAdj].at(idxPatchAdj)); rows.emplace_back(rowsGamma, col, lambda); rows.emplace_back(rowsGamma, colAdj, -lambda); ++rowsGamma; } } } } } ASSERT(rows.size()/2 < static_cast(std::numeric_limits::max())); SparseMat Gamma(rowsGamma, rowsX); Gamma.setFromTriplets(rows.Begin(), rows.End()); rows.Empty(); // fill the matrix A and the coefficients for the Vector b of the linear equation system IndexArr indices; Colors vertexColors; Colors coeffB; for (const SeamVertex& seamVertex: seamVertices) { if (seamVertex.patches.size() < 2) continue; seamVertex.SortByPatchIndex(indices); vertexColors.resize(indices.size()); FOREACH(i, indices) { const SeamVertex::Patch& patch0 = seamVertex.patches[indices[i]]; ASSERT(patch0.idxPatch < numPatches); SampleImage sampler(images[texturePatches[patch0.idxPatch].label].image); for (const SeamVertex::Patch::Edge& edge: patch0.edges) { const SeamVertex& seamVertex1 = seamVertices[edge.idxSeamVertex]; const SeamVertex::Patches::IDX idxPatch1(seamVertex1.patches.Find(patch0.idxPatch)); ASSERT(idxPatch1 != SeamVertex::Patches::NO_INDEX); const SeamVertex::Patch& patch1 = seamVertex1.patches[idxPatch1]; sampler.AddEdge(patch0.proj, patch1.proj); } vertexColors[i] = sampler.GetColor(); } const VertexPatch2RowMap& vertpatch2row = vertpatch2rows[seamVertex.idxVertex]; for (IDX i=0; i(std::numeric_limits::max())); const MatIdx rowsA((MatIdx)coeffB.size()); SparseMat A(rowsA, rowsX); A.setFromTriplets(rows.Begin(), rows.End()); rows.Release(); SparseMat Lhs(A.transpose() * A + Gamma.transpose() * Gamma); // CG uses only the lower triangle, so prune the rest and compress matrix Lhs.prune([](const int& row, const int& col, const float&) -> bool { return col <= row; }); // globally solve for the correction colors Eigen::Matrix colorAdjustments(rowsX, 3); { // init CG solver Eigen::ConjugateGradient solver; solver.setMaxIterations(1000); solver.setTolerance(0.0001f); solver.compute(Lhs); ASSERT(solver.info() == Eigen::Success); #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for #endif for (int channel=0; channel<3; ++channel) { // init right hand side vector const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, rowsA); // Bruce const Eigen::VectorXf Rhs(SparseMat(A.transpose()) * b); // Eigen::VectorXf Rhs = SparseMat(A.transpose()) * b_map[channel]; // colorAdjustments.col(channel) = solver.solve(Rhs).array() - solver.solve(Rhs).mean(); // solve for x const Eigen::VectorXf x(solver.solve(Rhs)); ASSERT(solver.info() == Eigen::Success); // subtract mean since the system is under-constrained and // we need the solution with minimal adjustments Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> >(colorAdjustments.data()+channel, rowsX) = x.array() - x.mean(); DEBUG_LEVEL(3, "\tcolor channel %d: %d iterations, %g residual", channel, solver.iterations(), solver.error()); } } // adjust texture patches using the correction colors #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)numPatches; ++i) { #else for (unsigned i=0; i 0 && bary.y > 0 && bary.z > 0) ? 1.0f : 0.8f; image(pt) = (colors[0]*bary.x + colors[1]*bary.y + colors[2]*bary.z) * weight; } } data(imageAdj); for (const FIndex idxFace: texturePatch.faces) { const Face& face = faces[idxFace]; data.tri = faceTexcoords.Begin()+idxFace*3; for (int v=0; v<3; ++v){ if (auto search = vertpatch2rows[face[v]].find(idxPatch); search != vertpatch2rows[face[v]].end()) data.colors[v] = colorAdjustments.row(vertpatch2rows[face[v]].at(idxPatch)); } ColorMap::RasterizeTriangleBary(data.tri[0], data.tri[1], data.tri[2], data); } // dilate with one pixel width, in order to make sure patch border smooths out a little imageAdj.DilateMean<1>(imageAdj, Color::ZERO); // Bruce cv::Mat adjMat(imageAdj); // cv::GaussianBlur(adjMat, adjMat, cv::Size(3,3), 0.5); // 将原有3x3高斯核升级为5x5,并增加迭代次数 cv::GaussianBlur(adjMat, adjMat, cv::Size(5,5), 1.2); // 新增:边缘保持滤波(保留锐利边缘的同时平滑颜色过渡) cv::Mat filteredAdj; cv::edgePreservingFilter(adjMat, filteredAdj, cv::RECURS_FILTER, 60, 0.4); adjMat = filteredAdj; // 修改:在应用调整时进行边缘检测,避免过度调整 // cv::Mat edgeMask; cv::Canny(images[texturePatch.label].image(texturePatch.rect), edgeMask, 50, 150); // apply color correction to the patch image cv::Mat image(images[texturePatch.label].image(texturePatch.rect)); //* for (int r=0; r(r,c) > 0 ? 0.3f : 1.0f; const Color& a = imageAdj(r,c); if (a == Color::ZERO) continue; Pixel8U& v = image.at(r,c); if (v.r == 0 && v.g == 0 && v.b == 0) continue; const Color col(RGB2YCBCR(Color(v))); // const Color acol(YCBCR2RGB(Color(col+a))); Color acol = YCBCR2RGB(Color(col + a * edgeWeight)); // 应用边缘权重 for (int p=0; p<3; ++p) { float val = acol[p]; val = std::min(std::max(val, 0.0f), 255.0f); v[p] = static_cast(val + 0.5f); // 四舍五入 } } } /* for (int r=0; r(r,c); const Color col(RGB2YCBCR(Color(v))); const Color acol(YCBCR2RGB(Color(col+a))); // 添加范围限制 (0-255) for (int p=0; p<3; ++p) { float val = acol[p]; val = std::min(std::max(val, 0.0f), 255.0f); // 确保在0-255范围内 v[p] = static_cast(val); } } } //*/ } } // New void MeshTexture::LocalSeamLeveling() { ASSERT(!seamVertices.empty()); const unsigned numPatches(texturePatches.size()-1); // Create a boolean array to mark invalid vertices BoolArr vertexInvalid(vertices.size()); vertexInvalid.Memset(false); FOREACH(f, faces) { if (labelsInvalid[f] != NO_ID) { const Face& face = faces[f]; for (int v=0; v<3; ++v) vertexInvalid[face[v]] = true; } } // adjust texture patches locally, so that the border continues smoothly inside the patch #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int i=0; i<(int)numPatches; ++i) { #else for (unsigned i=0; i(sampler, samplePos0)); const TexCoord samplePos1(p1 + p1Dir * l); const Color color1(image1.sample(sampler, samplePos1)/255.f); image(pt) = Color((color0 + color1) * 0.5f); // set mask edge also mask(pt) = border; } } data(image, mask, imageOrg, image1, p0, p0Adj, p1, p1Adj); Image32F3::DrawLine(p0, p0Adj, data); // skip remaining patches, // as a manifold edge is shared by maximum two face (one in each patch), which we found already break; } } // render the vertex at the patch border meeting neighbor patches AccumColor accumColor; // for each patch... for (const SeamVertex::Patch& patch: seamVertex0.patches) { // add its view to the vertex mean color const Image8U3& img(images[texturePatches[patch.idxPatch].label].image); accumColor.Add(img.sample(sampler, patch.proj)/255.f, 1.f); } const ImageRef pt(ROUND2INT(patch0.proj-offset)); image(pt) = accumColor.Normalized(); mask(pt) = border; } // make sure the border is continuous and // keep only the exterior tripe of the given size ProcessMask(mask, 20); // compute texture patch blending PoissonBlending(imageOrg, image, mask, bias); // apply color correction to the patch image cv::Mat imagePatch(image0(texturePatch.rect)); for (int r=0; r(r,c); for (int p=0; p<3; ++p) v[p] = (uint8_t)CLAMP(ROUND2INT(a[p]*255.f), 0, 255); } } } } void MeshTexture::GenerateTexture2(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename) { // Bruce bGlobalSeamLeveling = false; bLocalSeamLeveling = false; // project patches in the corresponding view and compute texture-coordinates and bounding-box const int border(2); faceTexcoords.resize(faces.size()*3); faceTexindices.resize(faces.size()); #ifdef TEXOPT_USE_OPENMP // LOG_OUT() << "def TEXOPT_USE_OPENMP" << std::endl; const unsigned numPatches(texturePatches.size()-1); #pragma omp parallel for schedule(dynamic) for (int_t idx=0; idx<(int_t)numPatches; ++idx) { TexturePatch& texturePatch = texturePatches[(uint32_t)idx]; #else for (TexturePatch *pTexturePatch=texturePatches.Begin(), *pTexturePatchEnd=texturePatches.End()-1; pTexturePatch 2 && (bGlobalSeamLeveling || bLocalSeamLeveling)) { // create seam vertices and edges CreateSeamVertices(); // perform global seam leveling if (bGlobalSeamLeveling) { TD_TIMER_STARTD(); GlobalSeamLeveling(); DEBUG_ULTIMATE("\tglobal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } // perform local seam leveling if (bLocalSeamLeveling) { TD_TIMER_STARTD(); LocalSeamLeveling(); DEBUG_ULTIMATE("\tlocal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); } } // merge texture patches with overlapping rectangles for (unsigned i=0; i 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { DEBUG("error: a patch of size %u x %u does not fit the texture", texturePatches[i].rect.width, texturePatches[i].rect.height); ABORT("the maximum texture size chosen cannot fit a patch"); } unplacedRects[i] = {texturePatches[i].rect, i}; } LOG_OUT() << "unplacedRects loop completed" << std::endl; LOG_OUT() << "pack patches: one pack per texture file loop completed" << std::endl; // pack patches: one pack per texture file CLISTDEF2IDX(RectsBinPack::RectWIdxArr, TexIndex) placedRects; { // increase texture size till all patches fit const unsigned typeRectsBinPack(nRectPackingHeuristic/100); const unsigned typeSplit((nRectPackingHeuristic-typeRectsBinPack*100)/10); const unsigned typeHeuristic(nRectPackingHeuristic%10); int textureSize = 0; while (!unplacedRects.empty()) { TD_TIMER_STARTD(); if (textureSize == 0) { textureSize = RectsBinPack::ComputeTextureSize(unplacedRects, nTextureSizeMultiple); if (maxTextureSize > 0 && textureSize > maxTextureSize) textureSize = maxTextureSize; } RectsBinPack::RectWIdxArr newPlacedRects; switch (typeRectsBinPack) { case 0: { MaxRectsBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, (MaxRectsBinPack::FreeRectChoiceHeuristic)typeHeuristic); break; } case 1: { SkylineBinPack pack(textureSize, textureSize, typeSplit!=0); newPlacedRects = pack.Insert(unplacedRects, (SkylineBinPack::LevelChoiceHeuristic)typeHeuristic); break; } case 2: { GuillotineBinPack pack(textureSize, textureSize); newPlacedRects = pack.Insert(unplacedRects, false, (GuillotineBinPack::FreeRectChoiceHeuristic)typeHeuristic, (GuillotineBinPack::GuillotineSplitHeuristic)typeSplit); break; } default: ABORT("error: unknown RectsBinPack type"); } DEBUG_ULTIMATE("\tpacking texture completed: %u initial patches, %u placed patches, %u texture-size, %u textures (%s)", texturePatches.size(), newPlacedRects.size(), textureSize, placedRects.size(), TD_TIMER_GET_FMT().c_str()); if (textureSize == maxTextureSize || unplacedRects.empty()) { // create texture image placedRects.emplace_back(std::move(newPlacedRects)); texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); textureSize = 0; } else { // try again with a bigger texture textureSize *= 2; if (maxTextureSize > 0) textureSize = std::max(textureSize, maxTextureSize); unplacedRects.JoinRemove(newPlacedRects); } } } LOG_OUT() << "Third loop completed" << std::endl; Mesh::FaceIdxArr emptyFaceIndexes; #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) for (int_t i=0; i<(int_t)placedRects.size(); ++i) { for (int_t j=0; j<(int_t)placedRects[(TexIndex)i].size(); ++j) { const TexIndex idxTexture((TexIndex)i); const uint32_t idxPlacedPatch((uint32_t)j); #else FOREACH(idxTexture, placedRects) { FOREACH(idxPlacedPatch, placedRects[idxTexture]) { #endif const TexturePatch& texturePatch = texturePatches[placedRects[idxTexture][idxPlacedPatch].patchIdx]; const RectsBinPack::Rect& rect = placedRects[idxTexture][idxPlacedPatch].rect; // copy patch image ASSERT((rect.width == texturePatch.rect.width && rect.height == texturePatch.rect.height) || (rect.height == texturePatch.rect.width && rect.width == texturePatch.rect.height)); int x(0), y(1); if (texturePatch.label != NO_ID) { const Image& imageData = images[texturePatch.label]; cv::Mat patch(imageData.image(texturePatch.rect)); if (rect.width != texturePatch.rect.width) { // flip patch and texture-coordinates patch = patch.t(); x = 1; y = 0; } patch.copyTo(texturesDiffuse[idxTexture](rect)); } else { auto it = texturePatch.faces.begin(); while (it != texturePatch.faces.end()) { emptyFaceIndexes.push_back(*it); ++it; } } // compute final texture coordinates const TexCoord offset(rect.tl()); for (const FIndex idxFace: texturePatch.faces) { TexCoord* texcoords = faceTexcoords.data()+idxFace*3; faceTexindices[idxFace] = idxTexture; for (int v=0; v<3; ++v) { TexCoord& texcoord = texcoords[v]; texcoord = TexCoord( texcoord[x]+offset.x, texcoord[y]+offset.y ); } } } } if (texturesDiffuse.size() == 1) faceTexindices.Release(); // apply some sharpening if (fSharpnessWeight > 0) { constexpr double sigma = 1.5; for (auto &textureDiffuse: texturesDiffuse) { Image8U3 blurryTextureDiffuse; cv::GaussianBlur(textureDiffuse, blurryTextureDiffuse, cv::Size(), sigma); cv::addWeighted(textureDiffuse, 1+fSharpnessWeight, blurryTextureDiffuse, -fSharpnessWeight, 0, textureDiffuse); } } LOG_OUT() << "Fourth loop completed" << std::endl; std::ofstream out(basename + "_empty_color_triangles.txt"); RFOREACHPTR(pIdxF, emptyFaceIndexes) { out << *pIdxF << "\n"; } out.close(); } } #include // 保存生成的纹理图集 bool SaveGeneratedTextures(const Mesh::Image8U3Arr& generatedTextures, const std::string& outputDir) { if (generatedTextures.empty()) { DEBUG_EXTRA("错误: 没有纹理可保存"); return false; } // 确保输出目录存在 #ifdef _WIN32 _mkdir(outputDir.c_str()); #else mkdir(outputDir.c_str(), 0755); #endif // 保存所有纹理 for (size_t i = 0; i < generatedTextures.size(); ++i) { if (generatedTextures[i].empty()) { DEBUG_EXTRA("警告: 纹理 %zu 为空,跳过保存", i); continue; } // 生成文件名 std::string filename = outputDir + "/texture_" + std::to_string(i) + ".png"; // 使用OpenCV保存图像 if (cv::imwrite(filename, generatedTextures[i])) { DEBUG_EXTRA("成功保存纹理: %s (尺寸: %dx%d)", filename.c_str(), generatedTextures[i].cols, generatedTextures[i].rows); } else { DEBUG_EXTRA("错误: 无法保存纹理到 %s", filename.c_str()); return false; } } return true; } void MeshTexture::CheckColorChannels(const Image8U3& texture, const std::string& name) { if (texture.empty()) { DEBUG_EXTRA("%s: 纹理为空", name.c_str()); return; } // 检查左上角几个像素的颜色 for (int y = 0; y < std::min(3, texture.rows); ++y) { for (int x = 0; x < std::min(3, texture.cols); ++x) { const cv::Vec3b& pixel = texture.at(y, x); DEBUG_EXTRA("%s[%d,%d]: B=%d, G=%d, R=%d", name.c_str(), x, y, pixel[0], pixel[1], pixel[2]); } } // 计算平均颜色 cv::Scalar mean = cv::mean(texture); DEBUG_EXTRA("%s 平均颜色: B=%.1f, G=%.1f, R=%.1f", name.c_str(), mean[0], mean[1], mean[2]); } bool MeshTexture::PackTextureAtlases( Mesh::TexCoordArr& faceTexcoords2, Mesh::TexIndexArr& faceTexindices2, std::vector& generatedTextures, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, int maxTextureSize) { DEBUG_EXTRA("PackTextureAtlases: heuristic=%u, maxSize=%d", nRectPackingHeuristic, maxTextureSize); if (texturePatches.empty()) { DEBUG_EXTRA("No texture patches to pack"); return false; } // 1. 准备纹理块列表 std::vector patches; for (auto& patch : texturePatches) { if (patch.rect.width > 0 && patch.rect.height > 0) { patches.push_back(&patch); } } if (patches.empty()) { DEBUG_EXTRA("No valid texture patches to pack"); return false; } DEBUG_EXTRA("Packing %zu texture patches", patches.size()); // 2. 计算图集大小 int atlasSize = 1024; // 默认大小 if (maxTextureSize > 0) { atlasSize = std::min(atlasSize, maxTextureSize); } // 确保大小是 nTextureSizeMultiple 的倍数 if (nTextureSizeMultiple > 1) { atlasSize = ((atlasSize + nTextureSizeMultiple - 1) / nTextureSizeMultiple) * nTextureSizeMultiple; } DEBUG_EXTRA("Using atlas size: %dx%d", atlasSize, atlasSize); // 3. 根据启发式算法对纹理块排序 switch (nRectPackingHeuristic) { case 0: // 按面积降序 std::sort(patches.begin(), patches.end(), [](const TexturePatch* a, const TexturePatch* b) { return a->rect.area() > b->rect.area(); }); break; case 1: // 按宽度降序 std::sort(patches.begin(), patches.end(), [](const TexturePatch* a, const TexturePatch* b) { return a->rect.width > b->rect.width; }); break; case 2: // 按高度降序 std::sort(patches.begin(), patches.end(), [](const TexturePatch* a, const TexturePatch* b) { return a->rect.height > b->rect.height; }); break; case 3: // 按最大边降序 std::sort(patches.begin(), patches.end(), [](const TexturePatch* a, const TexturePatch* b) { return std::max(a->rect.width, a->rect.height) > std::max(b->rect.width, b->rect.height); }); break; default: // 不排序 break; } // 4. 使用Skyline算法进行打包 std::vector skyline(atlasSize, 0); // Skyline高度数组 std::vector> placements; // 存放每个patch的位置 // 创建第一个图集 Image8U3 firstAtlas(atlasSize, atlasSize); firstAtlas.fill(colEmpty); generatedTextures.push_back(firstAtlas); int currentAtlasIndex = 0; // 当前图集索引 for (const auto& patch : patches) { int bestX = -1; int bestY = INT_MAX; int bestWidth = patch->rect.width; int bestHeight = patch->rect.height; // 寻找最佳放置位置 for (int x = 0; x <= atlasSize - bestWidth; ++x) { int maxY = 0; for (int w = 0; w < bestWidth; ++w) { maxY = std::max(maxY, skyline[x + w]); } if (maxY + bestHeight <= atlasSize && maxY < bestY) { bestY = maxY; bestX = x; } } if (bestX != -1) { // 找到位置,放置纹理块 placements.emplace_back(bestX, bestY); // 更新skyline for (int w = 0; w < bestWidth; ++w) { skyline[bestX + w] = bestY + bestHeight; } // 更新纹理坐标 for (FIndex fid : patch->faces) { TexCoord* texcoords = faceTexcoords2.data() + fid * 3; for (int j = 0; j < 3; ++j) { // 从原始坐标转换到图集坐标 float u = (texcoords[j].x - patch->rect.x) / patch->rect.width; float v = (texcoords[j].y - patch->rect.y) / patch->rect.height; // 计算新的图集坐标 texcoords[j].x = (bestX + u * bestWidth) / atlasSize; texcoords[j].y = (bestY + v * bestHeight) / atlasSize; } // 更新面的纹理索引 faceTexindices2[fid] = currentAtlasIndex; } } else { // 当前图集已满,创建新图集 DEBUG_EXTRA("Current atlas is full, creating new one"); // 创建新图集 Image8U3 newAtlas(atlasSize, atlasSize); newAtlas.fill(colEmpty); generatedTextures.push_back(newAtlas); currentAtlasIndex++; // 重置skyline std::fill(skyline.begin(), skyline.end(), 0); // 重新放置当前纹理块 int x = 0; int y = 0; placements.emplace_back(x, y); // 更新skyline for (int w = 0; w < bestWidth; ++w) { skyline[x + w] = bestHeight; } // 更新纹理坐标 for (FIndex fid : patch->faces) { TexCoord* texcoords = faceTexcoords2.data() + fid * 3; for (int j = 0; j < 3; ++j) { // 从原始坐标转换到图集坐标 float u = (texcoords[j].x - patch->rect.x) / patch->rect.width; float v = (texcoords[j].y - patch->rect.y) / patch->rect.height; // 计算新的图集坐标 texcoords[j].x = (x + u * bestWidth) / atlasSize; texcoords[j].y = (y + v * bestHeight) / atlasSize; } // 更新面的纹理索引 faceTexindices2[fid] = currentAtlasIndex; } } } DEBUG_EXTRA("Successfully packed %zu patches into %zu texture atlases", patches.size(), generatedTextures.size()); return true; } cv::Rect MeshTexture::ComputeOptimalPatchBounds(const AABB2f& aabb, const cv::Size& imageSize, int border) { DEBUG_EXTRA("Computing optimal patch bounds for AABB, image: %dx%d, border: %d", imageSize.width, imageSize.height, border); // 检查输入参数的有效性 if (imageSize.width <= 0 || imageSize.height <= 0) { DEBUG_EXTRA("Error: Invalid image size: %dx%d", imageSize.width, imageSize.height); return cv::Rect(0, 0, 0, 0); } if (border < 0) { border = 0; } // 步骤1: 获取AABB的边界值 float x1, y1, x2, y2; // 确保aabb是有效的 if (aabb.ptMin.x() >= aabb.ptMax.x() || aabb.ptMin.y() >= aabb.ptMax.y()) { DEBUG_EXTRA("Error: Invalid AABB"); return cv::Rect(0, 0, 0, 0); } x1 = aabb.ptMin.x(); y1 = aabb.ptMin.y(); x2 = aabb.ptMax.x(); y2 = aabb.ptMax.y(); DEBUG_EXTRA("AABB bounds: [%f, %f] - [%f, %f]", x1, y1, x2, y2); // 步骤2: 计算基本边界(包含边距) float minX = x1 - border; float minY = y1 - border; float maxX = x2 + border; float maxY = y2 + border; // 步骤3: 确保边界不超出图像范围 minX = std::max(0.0f, minX); minY = std::max(0.0f, minY); maxX = std::min(static_cast(imageSize.width) - 1.0f, maxX); maxY = std::min(static_cast(imageSize.height) - 1.0f, maxY); // 检查边界是否有效 if (minX >= maxX || minY >= maxY) { DEBUG_EXTRA("Warning: Invalid bounds after clamping: [%f, %f] - [%f, %f]", minX, minY, maxX, maxY); // 返回一个最小边界 int x = static_cast(std::floor(x1)); int y = static_cast(std::floor(y1)); x = std::max(0, std::min(x, imageSize.width - 1)); y = std::max(0, std::min(y, imageSize.height - 1)); return cv::Rect(x, y, 1, 1); } // 步骤4: 对齐到整数像素坐标 int x = static_cast(std::floor(minX)); int y = static_cast(std::floor(minY)); int width = static_cast(std::ceil(maxX)) - x; int height = static_cast(std::ceil(maxY)) - y; // 确保最小尺寸 width = std::max(1, width); height = std::max(1, height); // 确保不超出图像范围 if (x < 0) x = 0; if (y < 0) y = 0; if (x >= imageSize.width) x = imageSize.width - 1; if (y >= imageSize.height) y = imageSize.height - 1; if (x + width > imageSize.width) { width = imageSize.width - x; } if (y + height > imageSize.height) { height = imageSize.height - y; } // 最终验证 width = std::max(1, width); height = std::max(1, height); cv::Rect result(x, y, width, height); DEBUG_EXTRA("Optimal patch bounds computed: [%d, %d, %d, %d]", result.x, result.y, result.width, result.height); return result; } void MeshTexture::CleanSeamEdges() { DEBUG_EXTRA("Cleaning seam edges: removing invalid edges"); PairIdxArr validSeamEdges; validSeamEdges.Reserve(seamEdges.GetSize()); for (uint32_t i = 0; i < seamEdges.GetSize(); ++i) { const PairIdx& edge = seamEdges[i]; // 检查边索引是否有效 if (edge.i >= faces.GetSize() || edge.j >= faces.GetSize()) { DEBUG_EXTRA("Removing invalid seam edge %u: (%u, %u) - faces size: %u", i, edge.i, edge.j, faces.GetSize()); continue; } // 检查components数组 if (edge.i >= components.size() || edge.j >= components.size()) { DEBUG_EXTRA("Removing invalid seam edge %u: components size mismatch", i); continue; } // 检查组件ID if (components[edge.i] == NO_ID || components[edge.j] == NO_ID) { DEBUG_EXTRA("Removing seam edge %u: faces belong to invalid component", i); continue; } // 检查mapIdxPatch映射 if (components[edge.i] >= mapIdxPatch.GetSize() || components[edge.j] >= mapIdxPatch.GetSize()) { DEBUG_EXTRA("Removing seam edge %u: component ID out of mapIdxPatch range", i); continue; } validSeamEdges.push_back(edge); } seamEdges = validSeamEdges; DEBUG_EXTRA("After cleaning: %u valid seam edges remain", seamEdges.GetSize()); } void MeshTexture::FixComponentMappingsOnceAndForAll() { DEBUG_EXTRA("=== Fixing component mappings once and for all ==="); // 步骤1: 统计当前状态 int totalComponents = mapIdxPatch.GetSize(); int invalidComponents = 0; std::vector invalidCompIDs; for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx == NO_ID || patchIdx >= texturePatches.size()) { invalidComponents++; invalidCompIDs.push_back(compID); } } DEBUG_EXTRA("Total components: %d", totalComponents); DEBUG_EXTRA("Invalid components: %d (%.1f%%)", invalidComponents, (float)invalidComponents * 100.0f / totalComponents); if (invalidComponents == 0) { DEBUG_EXTRA("No invalid components found. Nothing to fix."); return; } // 步骤2: 构建面到纹理块的直接映射 std::vector faceToPatch(faces.GetSize(), NO_ID); for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { const TexturePatch& patch = texturePatches[patchIdx]; for (uint32_t i = 0; i < patch.faces.GetSize(); ++i) { FIndex faceIdx = patch.faces[i]; if (faceIdx < faceToPatch.size()) { faceToPatch[faceIdx] = static_cast(patchIdx); } } } // 步骤3: 统计每个纹理块的面数 std::vector patchFaceCount(texturePatches.size(), 0); for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { patchFaceCount[patchIdx] = texturePatches[patchIdx].faces.GetSize(); } DEBUG_EXTRA("Texture patch face counts:"); for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { DEBUG_EXTRA(" Patch %zu: %d faces", patchIdx, patchFaceCount[patchIdx]); } // 步骤4: 为每个组件找到最合适的纹理块 std::vector> compPatchVotes(totalComponents); // 统计每个组件中纹理块的票数 for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (faceIdx >= components.GetSize()) { continue; } uint32_t compID = components[faceIdx]; uint32_t patchIdx = faceToPatch[faceIdx]; if (compID != NO_ID && patchIdx != NO_ID) { if (compID < compPatchVotes.size()) { compPatchVotes[compID][patchIdx]++; } } } // 步骤5: 为无效组件重新分配纹理块 int fixedCount = 0; for (uint32_t compID : invalidCompIDs) { if (compID >= compPatchVotes.size()) { continue; } const auto& votes = compPatchVotes[compID]; if (!votes.empty()) { // 使用票数最多的纹理块 uint32_t bestPatch = NO_ID; int maxVotes = 0; for (const auto& vote : votes) { if (vote.second > maxVotes) { maxVotes = vote.second; bestPatch = vote.first; } } if (bestPatch != NO_ID) { mapIdxPatch[compID] = bestPatch; fixedCount++; DEBUG_EXTRA(" Fixed component %u -> patch %u (%d votes)", compID, bestPatch, maxVotes); continue; } } // 如果没有投票,尝试找到最近的组件 if (compID < compPatchVotes.size()) { // 找到这个组件的所有面 std::vector compFaces; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (faceIdx < components.GetSize() && components[faceIdx] == compID) { compFaces.push_back(faceIdx); } } if (!compFaces.empty()) { // 通过相邻面找到最常见的纹理块 std::unordered_map neighborPatchVotes; for (FIndex faceIdx : compFaces) { const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace < faceToPatch.size()) { uint32_t adjPatch = faceToPatch[adjFace]; if (adjPatch != NO_ID) { neighborPatchVotes[adjPatch]++; } } } } } if (!neighborPatchVotes.empty()) { uint32_t bestPatch = NO_ID; int maxVotes = 0; for (const auto& vote : neighborPatchVotes) { if (vote.second > maxVotes) { maxVotes = vote.second; bestPatch = vote.first; } } if (bestPatch != NO_ID) { mapIdxPatch[compID] = bestPatch; fixedCount++; DEBUG_EXTRA(" Fixed component %u -> neighbor patch %u (%d votes)", compID, bestPatch, maxVotes); continue; } } } } // 最后的回退方案:使用第一个有效的纹理块 uint32_t fallbackPatch = 0; for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { if (patchFaceCount[patchIdx] > 0) { fallbackPatch = static_cast(patchIdx); break; } } if (fallbackPatch < texturePatches.size()) { mapIdxPatch[compID] = fallbackPatch; fixedCount++; DEBUG_EXTRA(" Fixed component %u -> fallback patch %u", compID, fallbackPatch); } else { DEBUG_EXTRA(" WARNING: Cannot fix component %u - no valid patches", compID); } } DEBUG_EXTRA("Fixed %d/%d invalid components", fixedCount, invalidComponents); // 步骤6: 最终验证 int remainingInvalid = 0; for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx == NO_ID || patchIdx >= texturePatches.size()) { remainingInvalid++; } } if (remainingInvalid > 0) { DEBUG_EXTRA("WARNING: %d components still have invalid patch mappings", remainingInvalid); // 强制分配:将所有无效组件映射到第一个纹理块 for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { if (mapIdxPatch[compID] == NO_ID || mapIdxPatch[compID] >= texturePatches.size()) { mapIdxPatch[compID] = 0; // 强制映射到第一个纹理块 } } DEBUG_EXTRA("Forced all invalid components to patch 0"); } else { DEBUG_EXTRA("All components now have valid patch mappings!"); } DEBUG_EXTRA("=== Component mapping fix completed ==="); } void MeshTexture::AssignOrphanFacesToComponents(const std::vector& faceToPatch) { int assigned = 0; int created = 0; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] != NO_ID) { continue; } // 查找相邻面的组件 std::unordered_map neighborComponentCounts; std::unordered_map componentToPatch; const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != faceIdx && adjFace < components.GetSize()) { uint32_t compID = components[adjFace]; if (compID != NO_ID && compID < mapIdxPatch.GetSize()) { neighborComponentCounts[compID]++; componentToPatch[compID] = mapIdxPatch[compID]; } } } } // 优先选择有有效纹理块映射的相邻组件 uint32_t bestCompID = NO_ID; int maxCount = 0; for (const auto& pair : neighborComponentCounts) { uint32_t compID = pair.first; int count = pair.second; // 检查组件是否有有效的纹理块映射 auto it = componentToPatch.find(compID); if (it != componentToPatch.end()) { uint32_t patchIdx = it->second; if (patchIdx != NO_ID && patchIdx < texturePatches.size() - 1) { if (count > maxCount) { maxCount = count; bestCompID = compID; } } } } if (bestCompID != NO_ID) { // 分配到现有组件 components[faceIdx] = bestCompID; assigned++; } else { // 创建新组件 uint32_t newCompID = static_cast(components.GetSize()); // 扩展数组 components.Resize(faceIdx + 1); for (uint32_t i = faceIdx; i < components.GetSize(); ++i) { if (components[i] == NO_ID) { components[i] = NO_ID; } } components[faceIdx] = newCompID; // 扩展映射数组 if (newCompID >= mapIdxPatch.GetSize()) { mapIdxPatch.Resize(newCompID + 1); for (uint32_t i = 0; i < mapIdxPatch.GetSize(); ++i) { if (mapIdxPatch[i] == NO_ID && i < texturePatches.size()) { // 为新组件分配一个默认的有效纹理块 mapIdxPatch[i] = 0; } } } // 为这个新组件找到最近的纹理块 std::vector faceList = {faceIdx}; uint32_t nearestPatch = FindNearestPatchForFaces(faceList); if (nearestPatch != NO_ID && nearestPatch < texturePatches.size() - 1) { mapIdxPatch[newCompID] = nearestPatch; } else { mapIdxPatch[newCompID] = 0; // 默认值 } created++; } } DEBUG_EXTRA(" Assigned %d orphan faces to existing components, created %d new components", assigned, created); } void MeshTexture::RebuildComponentMapping() { DEBUG_EXTRA("Rebuilding component to patch mapping from scratch..."); // 1. 清除现有的映射 components.clear(); mapIdxPatch.clear(); // 2. 初始化数组 components.Resize(faces.GetSize()); for (uint32_t i = 0; i < components.GetSize(); ++i) { components[i] = NO_ID; } // 3. 构建面到纹理块的映射 std::vector faceToPatch(faces.GetSize(), NO_ID); for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { const TexturePatch& patch = texturePatches[patchIdx]; // 跳过无效纹理块 if (patch.faces.IsEmpty() || patchIdx == texturePatches.size() - 1) { // 跳过最后一个无效纹理块 continue; } // 将面映射到纹理块 for (uint32_t i = 0; i < patch.faces.GetSize(); ++i) { FIndex faceIdx = patch.faces[i]; if (faceIdx < faceToPatch.size()) { faceToPatch[faceIdx] = static_cast(patchIdx); } } } DEBUG_EXTRA(" Mapped %zu faces to patches", texturePatches.size() - 1); // 4. 构建连通组件(基于相邻面且有相同纹理块) uint32_t nextCompID = 0; std::vector visited(faces.GetSize(), false); for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (visited[faceIdx] || faceToPatch[faceIdx] == NO_ID) { continue; } // 广度优先搜索,找到同一个纹理块的连通区域 uint32_t currentPatch = faceToPatch[faceIdx]; std::queue queue; queue.push(faceIdx); visited[faceIdx] = true; while (!queue.empty()) { FIndex current = queue.front(); queue.pop(); // 分配组件ID components[current] = nextCompID; // 查找相邻面 const Face& face = faces[current]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != current && !visited[adjFace] && faceToPatch[adjFace] == currentPatch) { queue.push(adjFace); visited[adjFace] = true; } } } } nextCompID++; } DEBUG_EXTRA(" Created %u components", nextCompID); // 5. 创建组件到纹理块的映射 mapIdxPatch.Resize(nextCompID); for (uint32_t compID = 0; compID < nextCompID; ++compID) { mapIdxPatch[compID] = NO_ID; } // 统计每个组件中纹理块的分布 std::vector> componentPatchCounts(nextCompID); for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { uint32_t compID = components[faceIdx]; uint32_t patchIdx = faceToPatch[faceIdx]; if (compID != NO_ID && patchIdx != NO_ID) { componentPatchCounts[compID][patchIdx]++; } } // 6. 为每个组件选择最常用的纹理块 for (uint32_t compID = 0; compID < nextCompID; ++compID) { const auto& patchCounts = componentPatchCounts[compID]; if (!patchCounts.empty()) { // 找到最常用的纹理块 uint32_t bestPatch = NO_ID; int maxCount = 0; for (const auto& pair : patchCounts) { if (pair.second > maxCount) { maxCount = pair.second; bestPatch = pair.first; } } mapIdxPatch[compID] = bestPatch; } else { // 没有纹理块的组件,尝试找到最近的纹理块 std::vector componentFaces; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] == compID) { componentFaces.push_back(faceIdx); } } if (!componentFaces.empty()) { uint32_t nearestPatch = FindNearestPatchForFaces(componentFaces); if (nearestPatch != NO_ID && nearestPatch < texturePatches.size() - 1) { mapIdxPatch[compID] = nearestPatch; } else { // 如果没有找到,使用默认的第一个纹理块 mapIdxPatch[compID] = 0; } } else { mapIdxPatch[compID] = 0; // 默认值 } } } // 7. 处理孤立面(没有纹理块的面) int orphanCount = 0; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] == NO_ID) { orphanCount++; } } if (orphanCount > 0) { DEBUG_EXTRA(" Found %d orphan faces, assigning them to components...", orphanCount); AssignOrphanFacesToComponents(faceToPatch); } // 8. 验证结果 int invalidMappings = 0; for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx == NO_ID || patchIdx >= texturePatches.size()) { invalidMappings++; } } if (invalidMappings > 0) { DEBUG_EXTRA(" WARNING: %d components still have invalid patch mappings", invalidMappings); } else { DEBUG_EXTRA(" All components have valid patch mappings"); } } void MeshTexture::CheckMemoryIntegrity() { DEBUG_EXTRA("Checking memory integrity..."); // 检查关键数组的完整性 bool valid = true; // 1. 检查faces数组 if (faces.GetSize() == 0) { DEBUG_EXTRA(" ERROR: faces array is empty"); valid = false; } else { // 验证每个面的顶点索引 for (uint32_t i = 0; i < faces.GetSize(); ++i) { const Face& face = faces[i]; for (int j = 0; j < 3; ++j) { if (face[j] >= scene.mesh.vertices.size()) { DEBUG_EXTRA(" ERROR: Face %u has invalid vertex index %u at position %d", i, face[j], j); valid = false; } } } } // 2. 检查faceTexcoords数组 if (faceTexcoords.GetSize() != faces.GetSize() * 3) { DEBUG_EXTRA(" ERROR: faceTexcoords size mismatch: %u != %u * 3", faceTexcoords.GetSize(), faces.GetSize()); valid = false; } // 3. 检查components数组 if (components.GetSize() != faces.GetSize()) { DEBUG_EXTRA(" ERROR: components size mismatch: %u != %u", components.GetSize(), faces.GetSize()); valid = false; } // 4. 检查mapIdxPatch数组 for (uint32_t i = 0; i < mapIdxPatch.GetSize(); ++i) { if (mapIdxPatch[i] >= texturePatches.size()) { DEBUG_EXTRA(" WARNING: Component %u maps to invalid patch %u", i, mapIdxPatch[i]); } } if (valid) { DEBUG_EXTRA("Memory integrity check passed"); } else { DEBUG_EXTRA("Memory integrity check FAILED"); } } bool MeshTexture::ValidateSeamDataForLeveling() { // 简化的验证函数 if (faceTexcoords.GetSize() < faces.GetSize() * 3) { DEBUG_EXTRA("ERROR: faceTexcoords size too small"); return false; } if (components.GetSize() != faces.GetSize()) { DEBUG_EXTRA("ERROR: components size mismatch"); return false; } if (seamEdges.GetSize() > 0) { for (uint32_t i = 0; i < seamEdges.GetSize(); ++i) { if (seamEdges[i].i >= faces.GetSize() || seamEdges[i].j >= faces.GetSize()) { DEBUG_EXTRA("ERROR: Invalid seam edge at index %u", i); return false; } } } // 检查 seamVertices for (uint32_t i = 0; i < seamVertices.GetSize(); ++i) { const SeamVertex& sv = seamVertices[i]; // 检查顶点索引 if (sv.idxVertex >= scene.mesh.vertices.size()) { DEBUG_EXTRA("ERROR: Seam vertex %u has invalid vertex index %u", i, sv.idxVertex); return false; } // 检查是否有patch if (sv.patches.empty()) { DEBUG_EXTRA("ERROR: Seam vertex %u has no patches", i); return false; } // 遍历patches for (uint32_t patchIndex = 0; patchIndex < sv.patches.GetSize(); ++patchIndex) { const SeamVertex::Patch& patch = sv.patches[patchIndex]; // 使用idxPatch获取纹理块ID uint32_t patchID = patch.idxPatch; if (patchID >= texturePatches.size()) { DEBUG_EXTRA("ERROR: Seam vertex %u has invalid patch %u (texturePatches size: %zu)", i, patchID, texturePatches.size()); return false; } // 还可以检查edges是否有效 for (uint32_t edgeIdx = 0; edgeIdx < patch.edges.GetSize(); ++edgeIdx) { const SeamVertex::Patch::Edge& edge = patch.edges[edgeIdx]; if (edge.idxSeamVertex >= seamVertices.GetSize()) { DEBUG_EXTRA("ERROR: Seam vertex %u, patch %u, edge %u has invalid seam vertex index %u", i, patchID, edgeIdx, edge.idxSeamVertex); return false; } if (edge.idxFace >= faces.GetSize()) { DEBUG_EXTRA("ERROR: Seam vertex %u, patch %u, edge %u has invalid face index %u", i, patchID, edgeIdx, edge.idxFace); return false; } } } } DEBUG_EXTRA("Seam data validation passed"); return true; } // 主要改进的纹理映射函数 bool MeshTexture::GenerateTextureWithViewConsistency( bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const String& basename, bool bOriginFaceview, Scene* pScene) { DEBUG_EXTRA("Starting texture generation with view consistency optimization"); TD_TIMER_START(); const int border = (bOriginFaceview) ? 2 : 4; const int minPatchSize = 50; // 1. 创建视图一致性映射 std::vector faceViewData(faces.size()); std::vector patchAssignments(faces.size(), -1); // 2. 为每个面选择最佳视图 SelectOptimalViewsWithConsistency(faceViewData, minPatchSize); // 3. 基于视图一致性创建纹理块 CreateConsistentTexturePatches(faceViewData, patchAssignments, minPatchSize); DEBUG_EXTRA("Created %zu texture patches", texturePatches.size()); // 检查纹理块是否有效 if (texturePatches.IsEmpty()) { DEBUG_EXTRA("Error: No texture patches created"); return false; } // 4. 生成纹理坐标 Mesh::TexCoordArr faceTexcoords2(faces.size() * 3); Mesh::TexIndexArr faceTexindices2(faces.size()); DEBUG_EXTRA("Processing %zu texture patches", texturePatches.size()); // 计算需要处理的纹理块数量 size_t numPatchesToProcess = texturePatches.size(); if (!texturePatches.empty() && texturePatches.back().label == NO_ID) { numPatchesToProcess = texturePatches.size() - 1; } DEBUG_EXTRA("Processing %zu valid texture patches", numPatchesToProcess); // 处理有效纹理块 for (size_t idx = 0; idx < numPatchesToProcess; ++idx) { TexturePatch& texturePatch = texturePatches[idx]; // 检查纹理块是否有效 if (texturePatch.faces.IsEmpty()) { DEBUG_EXTRA("Warning: Texture patch %zu is empty", idx); continue; } // 检查视图ID是否有效 if (texturePatch.label == NO_ID || texturePatch.label >= images.size()) { DEBUG_EXTRA("Warning: Texture patch %zu has invalid label: %d", idx, texturePatch.label); continue; } const Image& imageData = images[texturePatch.label]; // 检查图像是否有效 if (imageData.image.empty()) { DEBUG_EXTRA("Warning: Image for patch %zu is empty", idx); continue; } AABB2f aabb(true); // 计算纹理块的UV边界 bool validAABB = false; for (const FIndex idxFace : texturePatch.faces) { if (idxFace >= faces.size()) { DEBUG_EXTRA("Warning: Invalid face index in patch %zu: %u", idx, idxFace); continue; } const Face& face = faces[idxFace]; TexCoord* texcoords = faceTexcoords2.data() + idxFace * 3; bool faceValid = true; for (int i = 0; i < 3; ++i) { if (face[i] >= vertices.size()) { DEBUG_EXTRA("Warning: Invalid vertex index in face %u", idxFace); faceValid = false; break; } texcoords[i] = imageData.camera.ProjectPointP(vertices[face[i]]); // 检查纹理坐标是否在图像边界内 if (!imageData.image.isInsideWithBorder(texcoords[i], border)) { float border_f = static_cast(border); float imgWidth = static_cast(imageData.image.width()); float imgHeight = static_cast(imageData.image.height()); texcoords[i].x = std::max(border_f, std::min(texcoords[i].x, imgWidth - border_f - 1.0f)); texcoords[i].y = std::max(border_f, std::min(texcoords[i].y, imgHeight - border_f - 1.0f)); } aabb.InsertFull(texcoords[i]); } if (faceValid) { validAABB = true; } } if (!validAABB) { DEBUG_EXTRA("Warning: Texture patch %zu has no valid faces", idx); texturePatch.rect = cv::Rect(0, 0, 1, 1); continue; } // 设置纹理块边界 cv::Rect patchRect = ComputeOptimalPatchBounds(aabb, imageData.image.size(), border); // 检查边界是否有效 if (patchRect.width <= 0 || patchRect.height <= 0 || patchRect.x < 0 || patchRect.y < 0 || patchRect.x + patchRect.width > imageData.image.width() || patchRect.y + patchRect.height > imageData.image.height()) { DEBUG_EXTRA("Warning: Invalid rect for patch %zu: [%d, %d, %d, %d]", idx, patchRect.x, patchRect.y, patchRect.width, patchRect.height); // 设置一个安全的边界 int safeX = std::max(0, patchRect.x); int safeY = std::max(0, patchRect.y); int safeWidth = std::max(1, std::min(imageData.image.width() - safeX, patchRect.width)); int safeHeight = std::max(1, std::min(imageData.image.height() - safeY, patchRect.height)); patchRect = cv::Rect(safeX, safeY, safeWidth, safeHeight); } texturePatch.rect = patchRect; DEBUG_EXTRA("Patch %zu bounds: [%d, %d, %d, %d]", idx, patchRect.x, patchRect.y, patchRect.width, patchRect.height); } // 5. 处理无效视图纹理块 if (!texturePatches.empty() && texturePatches.back().label == NO_ID) { DEBUG_EXTRA("Processing invalid view patch"); TexturePatch& texturePatch = texturePatches.back(); const int sizePatch = border * 2 + 1; texturePatch.rect = cv::Rect(0, 0, sizePatch, sizePatch); for (const FIndex idxFace : texturePatch.faces) { if (idxFace >= faces.size()) { DEBUG_EXTRA("Warning: Invalid face index in invalid patch: %u", idxFace); continue; } TexCoord* texcoords = faceTexcoords2.data() + idxFace * 3; for (int i = 0; i < 3; ++i) { texcoords[i] = TexCoord(0.5f, 0.5f); } } } // 在执行接缝均衡前 DEBUG_EXTRA("=== Starting seam leveling phase ==="); // 确保组件映射有效 if (texturePatches.size() > 2) { DEBUG_EXTRA("Preparing for seam leveling"); // 第一步:确保组件映射有效 DEBUG_EXTRA("Fixing component mappings..."); FixComponentMappingsOnceAndForAll(); // 第二步:重新初始化接缝数据 DEBUG_EXTRA("Reinitializing seam data..."); ReinitializeSeamData(); // 第三步:验证数据一致性 if (ValidateSeamDataConsistency()) { DEBUG_EXTRA("Seam data validation passed"); // 第四步:清理接缝边 DEBUG_EXTRA("Cleaning seam edges..."); CleanSeamEdgesComprehensive(); // 第五步:创建接缝顶点 DEBUG_EXTRA("Creating seam vertices..."); CreateSeamVertices(); if (bGlobalSeamLeveling) { DEBUG_EXTRA("Starting global seam leveling"); GlobalSeamLevelingEnhanced(); } if (bLocalSeamLeveling) { DEBUG_EXTRA("Starting local seam leveling"); // 在局部接缝均衡前进行额外的安全检查 DEBUG_EXTRA("Performing safety checks before local seam leveling..."); // 检查faceTexcoords数组 - 使用更安全的方法 if (faceTexcoords.GetSize() != faces.GetSize() * 3) { DEBUG_EXTRA("WARNING: faceTexcoords size mismatch: %u (expected %u). Fixing...", faceTexcoords.GetSize(), faces.GetSize() * 3); // 创建临时数组 TexCoord* tempData = new TexCoord[faces.GetSize() * 3]; // 复制现有数据 uint32_t copySize = std::min(faceTexcoords.GetSize(), faces.GetSize() * 3); for (uint32_t i = 0; i < copySize; ++i) { tempData[i] = faceTexcoords[i]; } // 用默认值填充剩余部分 for (uint32_t i = copySize; i < faces.GetSize() * 3; ++i) { tempData[i] = TexCoord(0.5f, 0.5f); } // 重新分配数组 faceTexcoords.Resize(faces.GetSize() * 3); for (uint32_t i = 0; i < faces.GetSize() * 3; ++i) { faceTexcoords[i] = tempData[i]; } // 清理临时数组 delete[] tempData; DEBUG_EXTRA("Fixed faceTexcoords size to %u", faceTexcoords.GetSize()); } // 验证数据结构完整性 if (!ValidateSeamDataForLeveling()) { DEBUG_EXTRA("ERROR: Seam data validation failed. Skipping local seam leveling."); } else { // 应用局部接缝均衡 LocalSeamLevelingEnhanced(); } } } else { DEBUG_EXTRA("WARNING: Seam data validation failed. Skipping seam leveling."); } } // 7. 合并重叠的纹理块 DEBUG_EXTRA("Merging overlapping patches"); MergeOverlappingPatches(faceTexcoords2); // 8. 打包纹理块 DEBUG_EXTRA("Packing texture atlases"); std::vector generatedTextures; PackTextureAtlases(faceTexcoords2, faceTexindices2, generatedTextures, nTextureSizeMultiple, nRectPackingHeuristic, colEmpty, maxTextureSize); // 9. 高质量纹理采样 DEBUG_EXTRA("Generating high-quality texture"); GenerateHighQualityTexture(generatedTextures, faceTexcoords2, faceTexindices2, fSharpnessWeight, colEmpty); // 10. 应用纹理锐化 if (fSharpnessWeight > 0) { DEBUG_EXTRA("Applying adaptive sharpening"); // ApplyAdaptiveSharpening(generatedTextures, fSharpnessWeight); } // 11. 填充空洞 DEBUG_EXTRA("Filling texture holes"); // FillTextureHoles(generatedTextures, colEmpty); // 12. 保存结果 scene.mesh.texturesDiffuse = std::move(generatedTextures); scene.mesh.faceTexindices = std::move(faceTexindices2); DEBUG_EXTRA("Texture generation with view consistency completed in %s", TD_TIMER_GET_FMT().c_str()); return true; } // 重新为面分配组件ID的函数 bool MeshTexture::ReassignComponentForFace(FIndex faceIdx) { if (faceIdx >= faces.GetSize()) { return false; } if (faceIdx >= components.size()) { return false; } uint32_t currentCompID = components[faceIdx]; if (currentCompID != NO_ID && currentCompID < mapIdxPatch.GetSize()) { uint32_t currentPatch = mapIdxPatch[currentCompID]; if (currentPatch < texturePatches.size()) { return true; // 已经是有效的组件 } } // 找到面的相邻面,看它们属于哪个组件 std::unordered_map neighborComponentCounts; const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) continue; const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != faceIdx && adjFace < components.size()) { uint32_t compID = components[adjFace]; if (compID != NO_ID && compID < mapIdxPatch.GetSize()) { neighborComponentCounts[compID]++; } } } } // 选择最常见的相邻组件 uint32_t bestCompID = NO_ID; int maxCount = 0; for (const auto& pair : neighborComponentCounts) { if (pair.second > maxCount) { maxCount = pair.second; bestCompID = pair.first; } } if (bestCompID != NO_ID) { components[faceIdx] = bestCompID; return true; } return false; } // 重新为面分配纹理块的函数 bool MeshTexture::ReassignFaceToCorrectPatch(FIndex faceIdx) { if (faceIdx >= faces.GetSize()) { return false; } if (faceIdx >= components.size()) { return false; } uint32_t compID = components[faceIdx]; if (compID == NO_ID) { // 如果没有组件ID,尝试找到最近的纹理块 uint32_t patchIdx = FindNearestPatchForFaces({faceIdx}); if (patchIdx != NO_ID) { // 为这个面创建一个新的组件 uint32_t newCompID = static_cast(components.size()); components[faceIdx] = newCompID; // 扩展mapIdxPatch数组 if (newCompID >= mapIdxPatch.GetSize()) { mapIdxPatch.Resize(newCompID + 1); } mapIdxPatch[newCompID] = patchIdx; return true; } return false; } // 检查当前组件是否映射到有效的纹理块 if (compID >= mapIdxPatch.GetSize()) { return false; } uint32_t currentPatch = mapIdxPatch[compID]; if (currentPatch < texturePatches.size()) { return true; // 已经有效 } // 找到最近的纹理块 uint32_t nearestPatch = FindNearestPatchForFaces({faceIdx}); if (nearestPatch != NO_ID) { mapIdxPatch[compID] = nearestPatch; return true; } return false; } void MeshTexture::CleanSeamEdgesComprehensive() { DEBUG_EXTRA("Cleaning seam edges - simplified version"); PairIdxArr validSeamEdges; validSeamEdges.Reserve(seamEdges.GetSize()); int validCount = 0; int invalidCount = 0; int samePatchCount = 0; for (uint32_t edgeIdx = 0; edgeIdx < seamEdges.GetSize(); ++edgeIdx) { const PairIdx& edge = seamEdges[edgeIdx]; // 基本有效性检查 if (edge.i >= faces.GetSize() || edge.j >= faces.GetSize()) { invalidCount++; continue; } if (edge.i >= components.GetSize() || edge.j >= components.GetSize()) { invalidCount++; continue; } uint32_t comp0 = components[edge.i]; uint32_t comp1 = components[edge.j]; if (comp0 == NO_ID || comp1 == NO_ID || comp0 >= mapIdxPatch.GetSize() || comp1 >= mapIdxPatch.GetSize()) { invalidCount++; continue; } uint32_t patch0 = mapIdxPatch[comp0]; uint32_t patch1 = mapIdxPatch[comp1]; if (patch0 >= texturePatches.size() || patch1 >= texturePatches.size()) { invalidCount++; continue; } if (patch0 == patch1) { samePatchCount++; continue; } // 这是有效的接缝边 validSeamEdges.push_back(edge); validCount++; } seamEdges = validSeamEdges; DEBUG_EXTRA("Seam edges cleaned: %d valid, %d same patch, %d invalid", validCount, samePatchCount, invalidCount); } uint32_t MeshTexture::FindOrCreateComponentForFace(FIndex faceIdx) { if (faceIdx >= faces.GetSize()) { return NO_ID; } // 首先检查是否已经有组件 if (faceIdx < components.GetSize() && components[faceIdx] != NO_ID) { return components[faceIdx]; } // 查找相邻面的组件 std::unordered_map neighborComponentCounts; const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != faceIdx && adjFace < components.GetSize()) { uint32_t compID = components[adjFace]; if (compID != NO_ID) { neighborComponentCounts[compID]++; } } } } // 如果有相邻组件,使用最常见的那个 if (!neighborComponentCounts.empty()) { uint32_t bestCompID = NO_ID; int maxCount = 0; for (const auto& pair : neighborComponentCounts) { if (pair.second > maxCount) { maxCount = pair.second; bestCompID = pair.first; } } if (bestCompID != NO_ID) { return bestCompID; } } // 没有相邻组件,创建新的 uint32_t newCompID = static_cast(components.GetSize()); // 确保components数组足够大 if (faceIdx >= components.GetSize()) { uint32_t oldSize = components.GetSize(); // 使用Resize而不是resize components.Resize(faceIdx + 1); // 初始化新添加的元素为NO_ID for (uint32_t i = oldSize; i < components.GetSize(); ++i) { components[i] = NO_ID; } } components[faceIdx] = newCompID; // 确保mapIdxPatch数组足够大 if (newCompID >= mapIdxPatch.GetSize()) { uint32_t oldSize = mapIdxPatch.GetSize(); mapIdxPatch.Resize(newCompID + 1); // 初始化新添加的元素为NO_ID for (uint32_t i = oldSize; i < mapIdxPatch.GetSize(); ++i) { mapIdxPatch[i] = NO_ID; } } return newCompID; } uint32_t MeshTexture::FindNearestPatchForComponent(uint32_t compID) { if (compID == NO_ID) { return NO_ID; } // 收集属于这个组件的所有面 std::vector componentFaces; for (FIndex faceIdx = 0; faceIdx < components.size(); ++faceIdx) { if (components[faceIdx] == compID) { componentFaces.push_back(faceIdx); } } if (componentFaces.empty()) { return NO_ID; } return FindNearestPatchForFaces(componentFaces); } bool MeshTexture::ValidateSeamDataConsistency() { DEBUG_EXTRA("Validating seam data consistency..."); bool valid = true; // 1. 验证components数组 if (components.size() != faces.GetSize()) { DEBUG_EXTRA("ERROR: components size (%zu) doesn't match faces size (%u)", components.size(), faces.GetSize()); return false; } // 2. 验证每个面都有组件ID int orphanFaces = 0; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] == NO_ID) { orphanFaces++; } } if (orphanFaces > 0) { DEBUG_EXTRA("WARNING: %d faces have no component ID", orphanFaces); } // 3. 验证mapIdxPatch数组 for (uint32_t compID = 0; compID < mapIdxPatch.GetSize(); ++compID) { uint32_t patchIdx = mapIdxPatch[compID]; if (patchIdx >= texturePatches.size()) { DEBUG_EXTRA("ERROR: Component %u maps to invalid patch %u", compID, patchIdx); valid = false; } } // 4. 验证纹理块 for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { const TexturePatch& patch = texturePatches[patchIdx]; for (uint32_t i = 0; i < patch.faces.GetSize(); ++i) { FIndex fid = patch.faces[i]; if (fid >= faces.GetSize()) { DEBUG_EXTRA("ERROR: Patch %zu contains invalid face index %u", patchIdx, fid); valid = false; } else if (fid >= components.size()) { DEBUG_EXTRA("ERROR: Patch %zu face %u out of components range", patchIdx, fid); valid = false; } } } if (valid) { DEBUG_EXTRA("Seam data consistency validation passed"); } else { DEBUG_EXTRA("Seam data consistency validation failed"); } return valid; } void MeshTexture::ReinitializeSeamData() { DEBUG_EXTRA("Reinitializing seam data (simplified)..."); // 1. 确保组件数组大小正确 if (components.GetSize() != faces.GetSize()) { DEBUG_EXTRA("Resizing components array from %u to %u", components.GetSize(), faces.GetSize()); components.Resize(faces.GetSize()); } // 2. 初始化所有组件为NO_ID for (uint32_t i = 0; i < components.GetSize(); ++i) { components[i] = NO_ID; } // 3. 从纹理块分配组件ID uint32_t nextCompID = 0; for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { const TexturePatch& patch = texturePatches[patchIdx]; // 跳过空纹理块 if (patch.faces.IsEmpty()) { continue; } // 为这个纹理块的所有面分配同一个组件ID for (uint32_t i = 0; i < patch.faces.GetSize(); ++i) { FIndex faceIdx = patch.faces[i]; if (faceIdx < components.GetSize()) { components[faceIdx] = nextCompID; } } nextCompID++; } DEBUG_EXTRA("Assigned %u components from %zu patches", nextCompID, texturePatches.size()); // 4. 处理没有组件的面 int orphanFaces = 0; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] == NO_ID) { orphanFaces++; } } if (orphanFaces > 0) { DEBUG_EXTRA("Found %d faces without components. Assigning them...", orphanFaces); // 为孤立面分配组件 for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (components[faceIdx] != NO_ID) { continue; } // 查找相邻面的组件 uint32_t neighborComp = NO_ID; const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != faceIdx && adjFace < components.GetSize()) { if (components[adjFace] != NO_ID) { neighborComp = components[adjFace]; break; } } } if (neighborComp != NO_ID) { break; } } if (neighborComp != NO_ID) { components[faceIdx] = neighborComp; } else { // 创建一个新组件 components[faceIdx] = nextCompID; nextCompID++; } } } // 5. 创建组件到纹理块的映射 mapIdxPatch.Resize(nextCompID); for (uint32_t compID = 0; compID < nextCompID; ++compID) { mapIdxPatch[compID] = NO_ID; // 初始化为NO_ID } // 6. 为每个组件找到对应的纹理块 std::vector> compPatchVotes(nextCompID); for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (faceIdx >= components.GetSize()) { continue; } uint32_t compID = components[faceIdx]; // 查找这个面属于哪个纹理块 uint32_t facePatch = NO_ID; for (size_t patchIdx = 0; patchIdx < texturePatches.size(); ++patchIdx) { const TexturePatch& patch = texturePatches[patchIdx]; for (uint32_t i = 0; i < patch.faces.GetSize(); ++i) { if (patch.faces[i] == faceIdx) { facePatch = static_cast(patchIdx); break; } } if (facePatch != NO_ID) { break; } } if (facePatch != NO_ID && compID < compPatchVotes.size()) { compPatchVotes[compID][facePatch]++; } } // 7. 为每个组件分配票数最多的纹理块 for (uint32_t compID = 0; compID < nextCompID; ++compID) { const auto& votes = compPatchVotes[compID]; if (!votes.empty()) { uint32_t bestPatch = NO_ID; int maxVotes = 0; for (const auto& vote : votes) { if (vote.second > maxVotes) { maxVotes = vote.second; bestPatch = vote.first; } } if (bestPatch != NO_ID) { mapIdxPatch[compID] = bestPatch; } } // 如果还是NO_ID,使用默认值0 if (mapIdxPatch[compID] == NO_ID) { mapIdxPatch[compID] = 0; } } DEBUG_EXTRA("Seam data reinitialized: %u components, %u valid mappings", nextCompID, mapIdxPatch.GetSize()); } void MeshTexture::AssignComponentsToOrphanFaces() { DEBUG_EXTRA("Assigning components to orphan faces - improved version"); int assignedFaces = 0; // 使用队列处理孤立面 std::vector orphanFaces; for (FIndex faceIdx = 0; faceIdx < faces.GetSize(); ++faceIdx) { if (faceIdx >= components.GetSize() || components[faceIdx] == NO_ID) { orphanFaces.push_back(faceIdx); } } DEBUG_EXTRA("Found %zu orphan faces", orphanFaces.size()); // 多次迭代,直到没有变化 bool changed = true; int iteration = 0; const int MAX_ITERATIONS = 10; while (changed && iteration < MAX_ITERATIONS && !orphanFaces.empty()) { changed = false; iteration++; std::vector newOrphans; for (FIndex faceIdx : orphanFaces) { // 查找所有相邻面的组件 std::unordered_map neighborComponentCounts; std::unordered_map componentToPatch; // 组件到纹理块的映射 const Face& face = faces[faceIdx]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != faceIdx && adjFace < components.GetSize()) { uint32_t compID = components[adjFace]; if (compID != NO_ID) { neighborComponentCounts[compID]++; // 记录组件到纹理块的映射 if (compID < mapIdxPatch.GetSize()) { componentToPatch[compID] = mapIdxPatch[compID]; } } } } } // 选择有效的组件(有有效纹理块映射的) uint32_t bestCompID = NO_ID; int maxCount = 0; for (const auto& pair : neighborComponentCounts) { uint32_t compID = pair.first; int count = pair.second; // 检查组件是否有有效的纹理块映射 auto it = componentToPatch.find(compID); if (it != componentToPatch.end()) { uint32_t patchIdx = it->second; if (patchIdx != NO_ID && patchIdx < texturePatches.size() - 1) { if (count > maxCount) { maxCount = count; bestCompID = compID; } } } } if (bestCompID != NO_ID) { // 确保components数组足够大 if (faceIdx >= components.GetSize()) { uint32_t oldSize = components.GetSize(); components.Resize(faceIdx + 1); for (uint32_t i = oldSize; i < components.GetSize(); ++i) { components[i] = NO_ID; } } components[faceIdx] = bestCompID; assignedFaces++; changed = true; } else { newOrphans.push_back(faceIdx); } } orphanFaces = std::move(newOrphans); } // 处理剩余的孤立面 if (!orphanFaces.empty()) { DEBUG_EXTRA(" %zu faces remain orphaned after %d iterations. Grouping them...", orphanFaces.size(), iteration); // 将剩余的孤立面分组 std::vector> orphanGroups; std::unordered_set processed; for (FIndex faceIdx : orphanFaces) { if (processed.find(faceIdx) != processed.end()) { continue; } std::vector currentGroup; std::queue toProcess; toProcess.push(faceIdx); processed.insert(faceIdx); while (!toProcess.empty()) { FIndex current = toProcess.front(); toProcess.pop(); currentGroup.push_back(current); // 查找相邻的孤立面 const Face& face = faces[current]; for (int i = 0; i < 3; ++i) { VIndex vertexIdx = face[i]; if (vertexIdx >= scene.mesh.vertexFaces.size()) { continue; } const Mesh::FaceIdxArr& adjacentFaces = scene.mesh.vertexFaces[vertexIdx]; for (FIndex adjFace : adjacentFaces) { if (adjFace != current && std::find(orphanFaces.begin(), orphanFaces.end(), adjFace) != orphanFaces.end() && processed.find(adjFace) == processed.end()) { toProcess.push(adjFace); processed.insert(adjFace); } } } } if (!currentGroup.empty()) { orphanGroups.push_back(currentGroup); } } // 为每个组创建一个组件 for (const auto& group : orphanGroups) { if (group.empty()) { continue; } // 为这个组找到最合适的纹理块 uint32_t bestPatch = FindNearestPatchForFaces(group); if (bestPatch == NO_ID || bestPatch >= texturePatches.size() - 1) { // 无法找到合适的纹理块,使用默认的第一个纹理块 bestPatch = 0; } // 创建新组件 uint32_t newCompID = static_cast(components.GetSize()); // 确保mapIdxPatch数组足够大 if (newCompID >= mapIdxPatch.GetSize()) { uint32_t oldSize = mapIdxPatch.GetSize(); mapIdxPatch.Resize(newCompID + 1); for (uint32_t i = oldSize; i < mapIdxPatch.GetSize(); ++i) { mapIdxPatch[i] = NO_ID; } } // 设置映射 mapIdxPatch[newCompID] = bestPatch; // 设置每个面的组件ID for (FIndex faceIdx : group) { if (faceIdx >= components.GetSize()) { uint32_t oldSize = components.GetSize(); components.Resize(faceIdx + 1); for (uint32_t i = oldSize; i < components.GetSize(); ++i) { components[i] = NO_ID; } } components[faceIdx] = newCompID; assignedFaces++; } } } DEBUG_EXTRA(" Assigned components to %d orphan faces", assignedFaces); } // 视图一致性选择函数 void MeshTexture::SelectOptimalViewsWithConsistency( std::vector& faceViewData, int minPatchSize) { DEBUG_EXTRA("Selecting optimal views with consistency"); const int numFaces = (int)faces.size(); const float consistencyWeight = 0.3f; // 一致性权重 // 步骤1: 计算每个面的候选视图质量 std::vector>> faceCandidates(numFaces); #pragma omp parallel for schedule(dynamic) for (int fid = 0; fid < numFaces; ++fid) { const FIndex idxFace = (FIndex)fid; const Face& face = faces[idxFace]; // 计算候选视图 std::vector> candidates; for (int viewID = 0; viewID < (int)images.size(); ++viewID) { if (IsFaceVisibleFromView(idxFace, viewID)) { float quality = ComputeViewQuality(idxFace, viewID); candidates.emplace_back(viewID, quality); } } // 按质量排序 std::sort(candidates.begin(), candidates.end(), [](const auto& a, const auto& b) { return a.second > b.second; }); faceCandidates[fid] = std::move(candidates); } // 步骤2: 迭代优化视图选择,考虑相邻面一致性 std::vector selectedViews(numFaces, -1); std::vector viewScores(numFaces, 0.0f); // 初始化:选择质量最高的视图 #pragma omp parallel for schedule(static) for (int fid = 0; fid < numFaces; ++fid) { if (!faceCandidates[fid].empty()) { selectedViews[fid] = faceCandidates[fid][0].first; viewScores[fid] = faceCandidates[fid][0].second; } } // 构建顶点到面的映射,用于快速查找相邻面 std::vector> vertexToFaces(vertices.size()); for (FIndex fid = 0; fid < (FIndex)faces.size(); ++fid) { const Face& face = faces[fid]; for (int i = 0; i < 3; ++i) { vertexToFaces[face[i]].push_back(fid); } } // 迭代优化 for (int iteration = 0; iteration < 5; ++iteration) { int changes = 0; #pragma omp parallel for schedule(dynamic) reduction(+:changes) for (int fid = 0; fid < numFaces; ++fid) { if (faceCandidates[fid].empty()) continue; const Face& face = faces[fid]; std::vector neighborViews; // 通过共享顶点收集相邻面的视图 std::unordered_set processedNeighbors; for (int i = 0; i < 3; ++i) { const VIndex vertexIdx = face[i]; const std::vector& adjacentFaces = vertexToFaces[vertexIdx]; for (FIndex neighborFid : adjacentFaces) { // 跳过自己 if (neighborFid == (FIndex)fid) continue; // 强制类型转换 // 确保是真正的相邻面(共享边) const Face& neighborFace = faces[neighborFid]; bool sharesEdge = false; // 检查两个面是否共享一条边 for (int j = 0; j < 3 && !sharesEdge; ++j) { VIndex v1 = neighborFace[j]; VIndex v2 = neighborFace[(j + 1) % 3]; // 检查边 (v1, v2) 是否在当前面中 for (int k = 0; k < 3; ++k) { VIndex w1 = face[k]; VIndex w2 = face[(k + 1) % 3]; if ((v1 == w1 && v2 == w2) || (v1 == w2 && v2 == w1)) { sharesEdge = true; break; } } } if (sharesEdge && selectedViews[neighborFid] != -1) { if (processedNeighbors.find(neighborFid) == processedNeighbors.end()) { neighborViews.push_back(selectedViews[neighborFid]); processedNeighbors.insert(neighborFid); } } } } if (neighborViews.empty()) continue; // 计算相邻面中最常见的视图 std::unordered_map viewCounts; for (int viewID : neighborViews) { viewCounts[viewID]++; } int mostCommonView = -1; int maxCount = 0; for (const auto& pair : viewCounts) { if (pair.second > maxCount) { maxCount = pair.second; mostCommonView = pair.first; } } // 重新计算视图得分,考虑一致性 int bestView = selectedViews[fid]; float bestScore = viewScores[fid]; for (const auto& candidate : faceCandidates[fid]) { int viewID = candidate.first; float quality = candidate.second; // 计算一致性得分 float consistency = 0.0f; if (viewID == mostCommonView) { consistency = 0.5f * (static_cast(maxCount) / neighborViews.size()); } // 综合得分 float totalScore = (1.0f - consistencyWeight) * quality + consistencyWeight * consistency; if (totalScore > bestScore) { bestScore = totalScore; bestView = viewID; } } if (bestView != selectedViews[fid]) { selectedViews[fid] = bestView; viewScores[fid] = bestScore; changes++; } } DEBUG_EXTRA("View selection iteration %d: %d changes", iteration + 1, changes); if (changes == 0) break; } // 步骤3: 确保每个视图块至少有minPatchSize个面 std::unordered_map> viewToFaces; for (FIndex fid = 0; fid < (FIndex)selectedViews.size(); ++fid) { int viewID = selectedViews[fid]; if (viewID != -1) { viewToFaces[viewID].push_back(fid); } } // 移除过小的视图块 for (const auto& pair : viewToFaces) { if ((int)pair.second.size() < minPatchSize) { // 重新分配这些面 for (FIndex fid : pair.second) { // 寻找相邻面中最常见的视图 std::unordered_map neighborViewCounts; const Face& face = faces[fid]; for (int i = 0; i < 3; ++i) { const VIndex vertexIdx = face[i]; const std::vector& adjacentFaces = vertexToFaces[vertexIdx]; for (FIndex neighborFid : adjacentFaces) { if (neighborFid != fid && selectedViews[neighborFid] != -1) { neighborViewCounts[selectedViews[neighborFid]]++; } } } // 选择最常见的相邻视图 int bestNeighborView = -1; int maxNeighborCount = 0; for (const auto& countPair : neighborViewCounts) { if (countPair.second > maxNeighborCount) { maxNeighborCount = countPair.second; bestNeighborView = countPair.first; } } if (bestNeighborView != -1) { selectedViews[fid] = bestNeighborView; } else { // 如果没有合适的相邻视图,选择质量最高的候选视图 if (!faceCandidates[fid].empty()) { selectedViews[fid] = faceCandidates[fid][0].first; } } } } } // 保存结果 #pragma omp parallel for schedule(static) for (int fid = 0; fid < numFaces; ++fid) { faceViewData[fid].viewID = selectedViews[fid]; faceViewData[fid].quality = viewScores[fid]; } DEBUG_EXTRA("View selection with consistency completed"); } // 创建一致性纹理块 void MeshTexture::CreateConsistentTexturePatches( const std::vector& faceViewData, std::vector& patchAssignments, int minPatchSize) { DEBUG_EXTRA("Creating consistent texture patches"); const int numFaces = static_cast(faces.size()); std::vector visited(numFaces, false); int patchCounter = 0; // 清空现有的纹理块 texturePatches.clear(); // 修正: Clear() -> clear() // 步骤0: 构建边到面的映射 std::unordered_map> edgeToFaceMap; for (int fid = 0; fid < numFaces; ++fid) { const Face& face = faces[fid]; for (int i = 0; i < 3; ++i) { VIndex v0 = face[i]; VIndex v1 = face[(i + 1) % 3]; if (v0 > v1) std::swap(v0, v1); uint64_t edgeKey = (static_cast(v0) << 32) | v1; edgeToFaceMap[edgeKey].push_back(static_cast(fid)); } } // 步骤1: 创建基于视图的纹理块 std::vector> patchFaces; for (int startFace = 0; startFace < numFaces; ++startFace) { if (visited[startFace] || faceViewData[startFace].viewID == -1) { continue; } int viewID = faceViewData[startFace].viewID; std::vector currentPatch; std::queue faceQueue; faceQueue.push(startFace); visited[startFace] = true; while (!faceQueue.empty()) { int fid = faceQueue.front(); faceQueue.pop(); currentPatch.push_back(static_cast(fid)); patchAssignments[fid] = patchCounter; const Face& face = faces[fid]; // 查找相同视图的相邻面 for (int i = 0; i < 3; ++i) { VIndex idxV0 = face[i]; VIndex idxV1 = face[(i + 1) % 3]; if (idxV0 > idxV1) std::swap(idxV0, idxV1); uint64_t edgeKey = (static_cast(idxV0) << 32) | idxV1; auto it = edgeToFaceMap.find(edgeKey); if (it != edgeToFaceMap.end()) { for (FIndex neighborFid : it->second) { int nfid = static_cast(neighborFid); if (nfid == fid) continue; if (visited[nfid]) continue; if (faceViewData[nfid].viewID == viewID) { visited[nfid] = true; faceQueue.push(nfid); } } } } } // 只保留足够大的纹理块 if (static_cast(currentPatch.size()) >= minPatchSize) { patchFaces.push_back(std::move(currentPatch)); patchCounter++; } else { // 小纹理块重新标记为未分配 for (FIndex fid : currentPatch) { patchAssignments[static_cast(fid)] = -1; } } } DEBUG_EXTRA("Initial patch creation: %zu patches", patchFaces.size()); // 步骤2: 处理未分配的面 std::vector unassignedFaces; for (int fid = 0; fid < numFaces; ++fid) { if (patchAssignments[fid] == -1 && faceViewData[fid].viewID != -1) { unassignedFaces.push_back(fid); } } DEBUG_EXTRA("Found %zu unassigned faces", unassignedFaces.size()); if (!unassignedFaces.empty()) { // 构建相邻面映射 std::vector> faceNeighbors(numFaces); for (int fid = 0; fid < numFaces; ++fid) { const Face& face = faces[fid]; for (int i = 0; i < 3; ++i) { VIndex idxV0 = face[i]; VIndex idxV1 = face[(i + 1) % 3]; if (idxV0 > idxV1) std::swap(idxV0, idxV1); uint64_t edgeKey = (static_cast(idxV0) << 32) | idxV1; auto it = edgeToFaceMap.find(edgeKey); if (it != edgeToFaceMap.end()) { for (FIndex neighborFid : it->second) { int nfid = static_cast(neighborFid); if (nfid != fid) { faceNeighbors[fid].push_back(nfid); } } } } } // 为未分配的面找到最近的纹理块 std::vector faceDistances(numFaces, -1); std::queue bfsQueue; // 初始化BFS - 从已分配的面开始 for (int fid = 0; fid < numFaces; ++fid) { if (patchAssignments[fid] != -1) { bfsQueue.push(fid); faceDistances[fid] = 0; } } // 执行BFS std::vector> assignments; while (!bfsQueue.empty()) { int fid = bfsQueue.front(); bfsQueue.pop(); int currentPatch = patchAssignments[fid]; int currentDist = faceDistances[fid]; // 检查currentPatch是否有效 if (currentPatch < 0 || currentPatch >= (int)patchFaces.size()) { DEBUG_EXTRA("Warning: Invalid patch ID %d for face %d", currentPatch, fid); continue; } for (int neighborFid : faceNeighbors[fid]) { if (neighborFid < 0 || neighborFid >= numFaces) { DEBUG_EXTRA("Warning: Invalid neighbor face ID %d for face %d", neighborFid, fid); continue; } if (faceDistances[neighborFid] == -1) { faceDistances[neighborFid] = currentDist + 1; bfsQueue.push(neighborFid); // 如果这个面是未分配的,将其分配到当前面的纹理块 if (patchAssignments[neighborFid] == -1) { patchAssignments[neighborFid] = currentPatch; assignments.push_back(std::make_pair(neighborFid, currentPatch)); } } } } // 将分配的面加入到对应的纹理块 for (const auto& assignment : assignments) { int fid = assignment.first; int patchID = assignment.second; if (fid < 0 || fid >= numFaces) { DEBUG_EXTRA("Warning: Invalid face ID in assignment: %d", fid); continue; } if (patchID < 0 || patchID >= (int)patchFaces.size()) { DEBUG_EXTRA("Warning: Invalid patch ID in assignment: %d for face %d", patchID, fid); continue; } patchFaces[patchID].push_back(static_cast(fid)); } } // 步骤3: 创建纹理块 DEBUG_EXTRA("Creating texture patches structure with %zu patches", patchFaces.size()); // 先计算有效纹理块的数量 size_t validPatches = 0; for (const auto& patch : patchFaces) { if (!patch.empty()) { validPatches++; } } texturePatches.resize(validPatches + 1); // 修正: Resize() -> resize() DEBUG_EXTRA("Allocated %zu texture patches (including invalid patch)", texturePatches.size()); size_t patchIdx = 0; for (size_t patchID = 0; patchID < patchFaces.size(); ++patchID) { if (patchFaces[patchID].empty()) { DEBUG_EXTRA("Skipping empty patch %zu", patchID); continue; } TexturePatch& patch = texturePatches[patchIdx]; // 复制面索引 patch.faces.resize(patchFaces[patchID].size()); // 修正: Resize() -> resize() for (size_t i = 0; i < patchFaces[patchID].size(); ++i) { patch.faces[i] = patchFaces[patchID][i]; } // 确定纹理块的视图 std::unordered_map viewCounts; for (FIndex fid : patch.faces) { int viewID = faceViewData[static_cast(fid)].viewID; if (viewID != -1) { viewCounts[viewID]++; } } int dominantView = -1; int maxCount = 0; for (const auto& pair : viewCounts) { if (pair.second > maxCount) { maxCount = pair.second; dominantView = pair.first; } } patch.label = dominantView; DEBUG_EXTRA("Patch %zu: %zu faces, label: %d", patchIdx, patch.faces.size(), patch.label); patchIdx++; } // 步骤4: 创建无效面纹理块 if (!texturePatches.empty()) { TexturePatch& invalidPatch = texturePatches.back(); invalidPatch.label = NO_ID; invalidPatch.faces.clear(); // 清空数组 for (int fid = 0; fid < numFaces; ++fid) { if (faceViewData[fid].viewID == -1) { invalidPatch.faces.push_back(static_cast(fid)); // 修正: Insert -> push_back } } DEBUG_EXTRA("Invalid patch: %zu faces", invalidPatch.faces.size()); } DEBUG_EXTRA("Created %zu texture patches", texturePatches.size()); } // 高质量纹理生成 void MeshTexture::GenerateHighQualityTexture( std::vector& textures, const Mesh::TexCoordArr& faceTexcoords, const Mesh::TexIndexArr& faceTexindices, float fSharpnessWeight, Pixel8U colEmpty) { DEBUG_EXTRA("Generating high-quality texture with view consistency"); for (size_t texID = 0; texID < textures.size(); ++texID) { Image8U3& texture = textures[texID]; int texWidth = texture.cols; int texHeight = texture.rows; // 为每个像素跟踪采样信息 cv::Mat1f weightAccum(texHeight, texWidth, 0.0f); cv::Mat1i viewAccum(texHeight, texWidth, -1); cv::Mat3f colorAccum(texHeight, texWidth, cv::Vec3f(0, 0, 0)); // 第一次遍历:从最优视图采样 #ifdef TEXOPT_USE_OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int_t idx = 0; idx < (int_t)texturePatches.size() - 1; ++idx) { const TexturePatch& texturePatch = texturePatches[idx]; if (texturePatch.label == NO_ID) continue; const Image& imageData = images[texturePatch.label]; const cv::Mat& sourceImage = imageData.image; for (FIndex idxFace : texturePatch.faces) { if (faceTexindices[idxFace] != texID) continue; const TexCoord* texcoords = &faceTexcoords[idxFace * 3]; const Face& face = faces[idxFace]; // 计算纹理三角形边界 AABB2f uvBounds(true); for (int i = 0; i < 3; ++i) { uvBounds.InsertFull(texcoords[i]); } int startX = std::max(0, (int)floor(uvBounds.ptMin.x())); int startY = std::max(0, (int)floor(uvBounds.ptMin.y())); int endX = std::min(texWidth - 1, (int)ceil(uvBounds.ptMax.x())); int endY = std::min(texHeight - 1, (int)ceil(uvBounds.ptMax.y())); // 遍历纹理三角形内的所有像素 for (int y = startY; y <= endY; ++y) { for (int x = startX; x <= endX; ++x) { Point2f texCoord(x + 0.5f, y + 0.5f); // 检查像素是否在三角形内 Point3f barycentric; if (!PointInTriangle(texCoord, texcoords[0], texcoords[1], texcoords[2], barycentric)) { continue; } // 计算3D坐标 Vertex worldPoint = vertices[face[0]] * barycentric.x + vertices[face[1]] * barycentric.y + vertices[face[2]] * barycentric.z; // 投影到源图像 Point2f imgPoint = imageData.camera.ProjectPointP(worldPoint); if (imgPoint.x < 0 || imgPoint.x >= sourceImage.cols || imgPoint.y < 0 || imgPoint.y >= sourceImage.rows) { continue; } // 高质量采样(双线性插值 + 各向异性过滤) cv::Vec3f color = SampleHighQuality(sourceImage, imgPoint); #ifdef TEXOPT_USE_OPENMP #pragma omp critical #endif { // 累积颜色和权重 colorAccum(y, x) += cv::Vec3f(color[2], color[1], color[0]); // BGR -> RGB weightAccum(y, x) += 1.0f; viewAccum(y, x) = texturePatch.label; // 记录主视图 } } } } } // 第二次遍历:填充缺失像素,尽量从相同视图采样 FillMissingPixelsWithViewConsistency(texture, colorAccum, weightAccum, viewAccum, colEmpty); } } // 高质量采样函数 cv::Vec3f MeshTexture::SampleHighQuality(const cv::Mat& image, const Point2f& point) { int x0 = (int)floor(point.x); int y0 = (int)floor(point.y); int x1 = std::min(x0 + 1, image.cols - 1); int y1 = std::min(y0 + 1, image.rows - 1); float fx = point.x - x0; float fy = point.y - y0; float fx1 = 1.0f - fx; float fy1 = 1.0f - fy; // 双线性插值 const cv::Vec3b& c00 = image.at(y0, x0); const cv::Vec3b& c01 = image.at(y0, x1); const cv::Vec3b& c10 = image.at(y1, x0); const cv::Vec3b& c11 = image.at(y1, x1); cv::Vec3f result = cv::Vec3f(c00[0], c00[1], c00[2]) * (fx1 * fy1) + cv::Vec3f(c01[0], c01[1], c01[2]) * (fx * fy1) + cv::Vec3f(c10[0], c10[1], c10[2]) * (fx1 * fy) + cv::Vec3f(c11[0], c11[1], c11[2]) * (fx * fy); return result; } // 基于视图一致性填充缺失像素 void MeshTexture::FillMissingPixelsWithViewConsistency( Image8U3& texture, cv::Mat3f& colorAccum, cv::Mat1f& weightAccum, cv::Mat1i& viewAccum, Pixel8U colEmpty) { int rows = texture.rows; int cols = texture.cols; // 首先,从有数据的像素直接赋值 #pragma omp parallel for schedule(static) for (int y = 0; y < rows; ++y) { for (int x = 0; x < cols; ++x) { float weight = weightAccum(y, x); if (weight > 0) { cv::Vec3f color = colorAccum(y, x) / weight; texture.at(y, x) = cv::Vec3b( (unsigned char)std::min(255.0f, std::max(0.0f, color[2])), // B (unsigned char)std::min(255.0f, std::max(0.0f, color[1])), // G (unsigned char)std::min(255.0f, std::max(0.0f, color[0])) // R ); } } } // 填充缺失的像素 cv::Mat1b mask(rows, cols, (unsigned char)0); for (int y = 0; y < rows; ++y) { for (int x = 0; x < cols; ++x) { if (weightAccum(y, x) == 0) { mask(y, x) = 255; } } } // 使用视图一致性进行填充 int iterations = 0; int remainingPixels = cv::countNonZero(mask); while (remainingPixels > 0 && iterations < 10) { int filled = 0; #pragma omp parallel for schedule(dynamic) reduction(+:filled) for (int y = 0; y < rows; ++y) { for (int x = 0; x < cols; ++x) { if (mask(y, x) == 0) continue; int dominantView = -1; cv::Vec3f avgColor(0, 0, 0); int count = 0; // 检查3x3邻域 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int nx = x + dx; int ny = y + dy; if (nx >= 0 && nx < cols && ny >= 0 && ny < rows) { if (weightAccum(ny, nx) > 0) { int neighborView = viewAccum(ny, nx); if (dominantView == -1 || neighborView == dominantView) { dominantView = neighborView; avgColor += cv::Vec3f( texture.at(ny, nx)[2], texture.at(ny, nx)[1], texture.at(ny, nx)[0] ); count++; } } } } } if (count > 0) { cv::Vec3f finalColor = avgColor / count; texture.at(y, x) = cv::Vec3b( (unsigned char)finalColor[2], (unsigned char)finalColor[1], (unsigned char)finalColor[0] ); weightAccum(y, x) = 1.0f; viewAccum(y, x) = dominantView; mask(y, x) = 0; filled++; } } } remainingPixels -= filled; iterations++; if (filled == 0) { // 如果无法填充,使用更宽松的条件 break; } } // 填充最后剩余的像素 if (remainingPixels > 0) { cv::inpaint(texture, mask, texture, 3, cv::INPAINT_TELEA); } DEBUG_EXTRA("Filled %d missing pixels in %d iterations", cv::countNonZero(weightAccum == 0) - remainingPixels, iterations); } float MeshTexture::ComputeViewQuality(FIndex idxFace, int viewID) { const Face& face = faces[idxFace]; const Image& imageData = images[viewID]; // 计算投影面积 TexCoord texCoords[3]; for (int i = 0; i < 3; ++i) { texCoords[i] = imageData.camera.ProjectPointP(vertices[face[i]]); } // 计算三角形面积 float area = std::abs( (texCoords[1].x - texCoords[0].x) * (texCoords[2].y - texCoords[0].y) - (texCoords[1].y - texCoords[0].y) * (texCoords[2].x - texCoords[0].x) ) * 0.5f; // 计算视角质量(法线夹角) const Point3f& faceNormal = scene.mesh.faceNormals[idxFace]; // 计算面中心 const cv::Point3f faceCenter = (vertices[face[0]] + vertices[face[1]] + vertices[face[2]]) / 3.0f; // 计算相机中心到面中心的方向 // 注意:imageData.camera.C 是 CMatrix 类型,需要转换为 cv::Point3f const cv::Point3f cameraCenter( static_cast(imageData.camera.C.x), static_cast(imageData.camera.C.y), static_cast(imageData.camera.C.z) ); const cv::Point3f viewDir = cameraCenter - faceCenter; // 归一化 float viewLen = std::sqrt(viewDir.x * viewDir.x + viewDir.y * viewDir.y + viewDir.z * viewDir.z); if (viewLen > 0) { cv::Point3f normalizedViewDir = viewDir * (1.0f / viewLen); // 计算点积 float cosAngle = std::abs( faceNormal.x * normalizedViewDir.x + faceNormal.y * normalizedViewDir.y + faceNormal.z * normalizedViewDir.z ); // 计算图像分辨率 float resolution = 1.0f; // 综合质量评分 float quality = area * cosAngle * resolution; return quality; } return 0.0f; } float MeshTexture::ComputeFaceDistance(FIndex fid1, FIndex fid2) { const Point3f center1 = (vertices[faces[fid1][0]] + vertices[faces[fid1][1]] + vertices[faces[fid1][2]]) / 3.0f; const Point3f center2 = (vertices[faces[fid2][0]] + vertices[faces[fid2][1]] + vertices[faces[fid2][2]]) / 3.0f; Point3f diff = center2 - center1; return std::sqrt(diff.x * diff.x + diff.y * diff.y + diff.z * diff.z); } // 判断面是否在视图中可见 // 判断面是否在视图中可见 bool MeshTexture::IsFaceVisibleFromView(FIndex idxFace, int viewID) { if (viewID < 0 || viewID >= (int)images.size()) { return false; } const Face& face = faces[idxFace]; const Image& imageData = images[viewID]; // 检查面的三个顶点是否都在相机视锥体内 for (int i = 0; i < 3; ++i) { const cv::Point3f& vertex = vertices[face[i]]; // 将cv::Point3f转换为OpenMVS的Point3类型 // Camera类使用double精度,但ProjectPointP是模板函数,支持多种类型 cv::Point3d vertex3d(static_cast(vertex.x), static_cast(vertex.y), static_cast(vertex.z)); // 使用正确的ProjectPointP函数 // 从Camera头文件可以看到ProjectPointP返回TPoint2 cv::Point2d proj = imageData.camera.ProjectPointP(vertex3d); // 检查是否在图像范围内(添加边界容差) const float border = 5.0f; if (proj.x < -border || proj.x >= imageData.image.cols + border || proj.y < -border || proj.y >= imageData.image.rows + border) { return false; } // 可选:检查深度(点在相机前方) // 可以使用camera.PointDepth函数 double depth = imageData.camera.PointDepth(vertex3d); if (depth <= 0) { return false; // 点在相机后面 } } // 检查面法线和视图方向的夹角 const cv::Point3f& faceNormal = scene.mesh.faceNormals[idxFace]; // 计算面中心 cv::Point3f faceCenter = (vertices[face[0]] + vertices[face[1]] + vertices[face[2]]) * (1.0f / 3.0f); cv::Point3d faceCenter3d(faceCenter.x, faceCenter.y, faceCenter.z); // 计算相机中心 const cv::Point3d& cameraCenter = imageData.camera.C; // 计算视图方向 cv::Point3d viewDir = cameraCenter - faceCenter3d; double viewLen = cv::norm(viewDir); if (viewLen > 0) { // 归一化 viewDir /= viewLen; // 计算法线点积 double cosAngle = faceNormal.x * viewDir.x + faceNormal.y * viewDir.y + faceNormal.z * viewDir.z; // 如果夹角太大(接近90度),面可能不可见 // 使用阈值cos(85°) ≈ 0.087 if (cosAngle < 0.1) { return false; } } return true; } // 优化纹理接缝 void MeshTexture::OptimizeTextureSeams(const std::vector& textures, const std::vector>& seamPoints) { DEBUG_EXTRA("Optimizing texture seams"); if (textures.empty() || seamPoints.empty()) { return; } // 对每个纹理,在其边界处进行羽化 for (size_t i = 0; i < textures.size(); ++i) { cv::Mat texture = textures[i].clone(); const std::vector& seams = seamPoints[i]; if (seams.empty()) { continue; } // 创建接缝掩模 cv::Mat seamMask(texture.size(), CV_8UC1, cv::Scalar(0)); for (const Point2f& point : seams) { int x = static_cast(point.x); int y = static_cast(point.y); if (x >= 0 && x < texture.cols && y >= 0 && y < texture.rows) { // 在接缝点周围创建羽化区域 int radius = 3; // 羽化半径 for (int dy = -radius; dy <= radius; ++dy) { for (int dx = -radius; dx <= radius; ++dx) { int nx = x + dx; int ny = y + dy; if (nx >= 0 && nx < texture.cols && ny >= 0 && ny < texture.rows) { float distance = std::sqrt(dx*dx + dy*dy); float weight = std::max(0.0f, 1.0f - distance / radius); uchar current = seamMask.at(ny, nx); uchar newValue = cv::saturate_cast(current + weight * 255); seamMask.at(ny, nx) = newValue; } } } } } // 对掩模进行高斯模糊,创建平滑的过渡 cv::GaussianBlur(seamMask, seamMask, cv::Size(5, 5), 0); // 应用羽化 for (int y = 0; y < texture.rows; ++y) { for (int x = 0; x < texture.cols; ++x) { uchar maskValue = seamMask.at(y, x); if (maskValue > 0) { float alpha = maskValue / 255.0f; // 与相邻纹理混合 // 这里可以添加与相邻纹理的混合逻辑 // 暂时简单地进行高斯模糊 cv::Vec3b& pixel = texture.at(y, x); // 计算3x3邻域的平均值 cv::Vec3f sum(0, 0, 0); int count = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int nx = x + dx; int ny = y + dy; if (nx >= 0 && nx < texture.cols && ny >= 0 && ny < texture.rows) { sum += cv::Vec3f(texture.at(ny, nx)); ++count; } } } if (count > 0) { cv::Vec3f average = sum / count; cv::Vec3b blended = cv::Vec3b( cv::saturate_cast(pixel[0] * (1 - alpha) + average[0] * alpha), cv::saturate_cast(pixel[1] * (1 - alpha) + average[1] * alpha), cv::saturate_cast(pixel[2] * (1 - alpha) + average[2] * alpha) ); texture.at(y, x) = blended; } } } } // 更新纹理 const_cast(textures[i]) = texture; } DEBUG_EXTRA("Seam optimization completed"); } std::pair MeshTexture::FindSharedEdgeIndices(const Face& face0, const Face& face1) { // 找到两个面共享的边 for (int i = 0; i < 3; ++i) { int j = (i + 1) % 3; VIndex v0 = face0[i]; VIndex v1 = face0[j]; for (int k = 0; k < 3; ++k) { int l = (k + 1) % 3; VIndex w0 = face1[k]; VIndex w1 = face1[l]; // 检查是否是同一条边(顺序可能相同或相反) if ((v0 == w0 && v1 == w1) || (v0 == w1 && v1 == w0)) { return {static_cast(i), static_cast(k)}; } } } return {NO_ID, NO_ID}; } void MeshTexture::LocalSeamLevelingEnhanced() { DEBUG_EXTRA("Starting enhanced local seam leveling (simplified)..."); // 简化版本:只做最基本的处理,避免内存问题 if (seamEdges.empty()) { DEBUG_EXTRA("No seam edges. Skipping local seam leveling."); return; } if (faceTexcoords.GetSize() < faces.GetSize() * 3) { DEBUG_EXTRA("ERROR: faceTexcoords too small. Cannot perform local seam leveling."); return; } // 简单的平滑处理:对每个接缝边的纹理坐标进行平均 int processedEdges = 0; for (uint32_t edgeIdx = 0; edgeIdx < seamEdges.GetSize(); ++edgeIdx) { const PairIdx& edge = seamEdges[edgeIdx]; // 边界检查 if (edge.i >= faces.GetSize() || edge.j >= faces.GetSize()) { continue; } uint32_t texIdx0 = edge.i * 3; uint32_t texIdx1 = edge.j * 3; if (texIdx0 + 2 >= faceTexcoords.GetSize() || texIdx1 + 2 >= faceTexcoords.GetSize()) { continue; } // 获取两个面的组件 if (edge.i >= components.GetSize() || edge.j >= components.GetSize()) { continue; } uint32_t comp0 = components[edge.i]; uint32_t comp1 = components[edge.j]; if (comp0 >= mapIdxPatch.GetSize() || comp1 >= mapIdxPatch.GetSize()) { continue; } uint32_t patch0 = mapIdxPatch[comp0]; uint32_t patch1 = mapIdxPatch[comp1]; if (patch0 == patch1) { continue; // 不是接缝 } // 获取面的顶点 const Face& face0 = faces[edge.i]; const Face& face1 = faces[edge.j]; // 找到共享边 std::pair sharedIndices = FindSharedEdgeIndices(face0, face1); if (sharedIndices.first == NO_ID || sharedIndices.second == NO_ID) { continue; } uint32_t idx00 = sharedIndices.first; uint32_t idx01 = (sharedIndices.first + 1) % 3; uint32_t idx10 = sharedIndices.second; uint32_t idx11 = (sharedIndices.second + 1) % 3; // 获取纹理坐标 TexCoord& tc00 = faceTexcoords[texIdx0 + idx00]; TexCoord& tc01 = faceTexcoords[texIdx0 + idx01]; TexCoord& tc10 = faceTexcoords[texIdx1 + idx10]; TexCoord& tc11 = faceTexcoords[texIdx1 + idx11]; // 计算平均值 TexCoord avg0 = (tc00 + tc01) * 0.5f; TexCoord avg1 = (tc10 + tc11) * 0.5f; TexCoord avg = (avg0 + avg1) * 0.5f; // 应用平滑 tc00 = tc00 + (avg - avg0) * 0.1f; tc01 = tc01 + (avg - avg0) * 0.1f; tc10 = tc10 + (avg - avg1) * 0.1f; tc11 = tc11 + (avg - avg1) * 0.1f; processedEdges++; // 每处理1000条边检查一次内存 if (processedEdges % 1000 == 0) { DEBUG_EXTRA(" Processed %d seam edges", processedEdges); } } DEBUG_EXTRA("Enhanced local seam leveling completed. Processed %d seam edges.", processedEdges); } void MeshTexture::GlobalSeamLevelingEnhanced() { DEBUG_EXTRA("Starting enhanced global seam leveling with memory safety..."); if (seamVertices.empty()) { DEBUG_EXTRA("No seam vertices found. Skipping global seam leveling."); return; } if (seamEdges.empty()) { DEBUG_EXTRA("No seam edges found. Skipping global seam leveling."); return; } DEBUG_EXTRA("Processing %u seam vertices and %u seam edges", seamVertices.GetSize(), seamEdges.GetSize()); // 1. 构建接缝图 std::vector> seamGraph(seamVertices.GetSize()); std::vector seamVertexPositions(seamVertices.GetSize()); // 收集接缝顶点位置 for (uint32_t i = 0; i < seamVertices.GetSize(); ++i) { const SeamVertex& sv = seamVertices[i]; // 检查顶点索引 if (sv.idxVertex >= scene.mesh.vertices.size()) { DEBUG_EXTRA("WARNING: Seam vertex %u has invalid vertex index %u", i, sv.idxVertex); continue; } // 检查patches是否为空 if (sv.patches.empty()) { DEBUG_EXTRA("WARNING: Seam vertex %u has no patches", i); continue; } // 计算接缝顶点的平均位置 Point3f pos(0, 0, 0); int count = 0; // 遍历patches for (uint32_t patchIndex = 0; patchIndex < sv.patches.GetSize(); ++patchIndex) { const SeamVertex::Patch& patch = sv.patches[patchIndex]; uint32_t patchID = patch.idxPatch; if (patchID >= texturePatches.size()) { DEBUG_EXTRA("WARNING: Invalid patch ID %u in seam vertex %u", patchID, i); continue; } const TexturePatch& texturePatch = texturePatches[patchID]; // 遍历 patch 中的所有面 for (uint32_t faceIdx = 0; faceIdx < texturePatch.faces.GetSize(); ++faceIdx) { FIndex faceID = texturePatch.faces[faceIdx]; if (faceID >= faces.GetSize()) { DEBUG_EXTRA("WARNING: Invalid face index %u in patch %u", faceID, patchID); continue; } const Face& face = faces[faceID]; for (int j = 0; j < 3; ++j) { VIndex vIdx = face[j]; if (vIdx < scene.mesh.vertices.size()) { pos += scene.mesh.vertices[vIdx]; count++; } } } } if (count > 0) { seamVertexPositions[i] = pos / (float)count; } else { // 如果无法从关联的面计算位置,使用顶点自身位置 seamVertexPositions[i] = scene.mesh.vertices[sv.idxVertex]; } } // 2. 构建接缝边连接 for (uint32_t i = 0; i < seamEdges.GetSize(); ++i) { const PairIdx& edge = seamEdges[i]; // 验证边有效性 if (edge.i >= faces.GetSize() || edge.j >= faces.GetSize()) { DEBUG_EXTRA("WARNING: Invalid seam edge %u: (%u, %u)", i, edge.i, edge.j); continue; } // 找到边对应的接缝顶点 // 这里简化处理,实际需要根据顶点位置匹配 // 可以添加边连接 } // 3. 构建并求解线性系统 DEBUG_EXTRA("Building linear system for global seam leveling..."); // 这里简化处理,实际实现需要构建线性系统 // 通常涉及求解以下形式的线性系统:A * x = b // 其中x是需要求解的顶点位移 // 4. 应用全局变换 int changedCount = 0; // 这里简化处理,实际需要应用求解得到的变换 DEBUG_EXTRA("Enhanced global seam leveling completed. Changed %d face labels.", changedCount); } // 合并重叠的纹理块 void MeshTexture::MergeOverlappingPatches(Mesh::TexCoordArr& faceTexcoords2) { DEBUG_EXTRA("Merging overlapping texture patches"); if (texturePatches.IsEmpty()) { return; } // 按视图分组纹理块 std::unordered_map> patchesByView; for (int i = 0; i < (int)texturePatches.size(); ++i) { if (texturePatches[i].label != NO_ID) { patchesByView[texturePatches[i].label].push_back(i); } } // 对每个视图的纹理块进行合并 for (auto& viewPatches : patchesByView) { std::vector& patchIndices = viewPatches.second; if (patchIndices.size() <= 1) { continue; // 不需要合并 } // 计算纹理块之间的重叠程度 std::vector>> overlaps; for (size_t i = 0; i < patchIndices.size(); ++i) { TexturePatch& patch1 = texturePatches[patchIndices[i]]; if (patch1.rect.empty()) continue; for (size_t j = i + 1; j < patchIndices.size(); ++j) { TexturePatch& patch2 = texturePatches[patchIndices[j]]; if (patch2.rect.empty()) continue; // 计算两个纹理块的重叠区域 cv::Rect intersection = patch1.rect & patch2.rect; if (!intersection.empty()) { float overlapArea = (float)intersection.area(); float minArea = std::min((float)patch1.rect.area(), (float)patch2.rect.area()); float overlapRatio = overlapArea / minArea; if (overlapRatio > 0.1f) { // 重叠超过10% overlaps.emplace_back(overlapRatio, std::make_pair(patchIndices[i], patchIndices[j])); } } } } // 按重叠程度排序 std::sort(overlaps.begin(), overlaps.end(), [](const auto& a, const auto& b) { return a.first > b.first; }); // 合并高度重叠的纹理块 std::vector merged(texturePatches.size(), false); for (const auto& overlap : overlaps) { int idx1 = overlap.second.first; int idx2 = overlap.second.second; if (merged[idx1] || merged[idx2]) { continue; // 已经合并过了 } TexturePatch& patch1 = texturePatches[idx1]; TexturePatch& patch2 = texturePatches[idx2]; // 合并两个纹理块 // 计算合并后的边界框 cv::Rect mergedRect = patch1.rect | patch2.rect; // 合并面列表 - 使用 std::vector 代替 FIndexArr std::vector mergedFaces; // 从 patch1.faces 复制 for (size_t k = 0; k < patch1.faces.GetSize(); ++k) { mergedFaces.push_back(patch1.faces[k]); } // 从 patch2.faces 添加 for (size_t k = 0; k < patch2.faces.GetSize(); ++k) { mergedFaces.push_back(patch2.faces[k]); } // 创建新的纹理块 patch1.rect = mergedRect; // 将 std::vector 转回 patch1.faces patch1.faces.Release(); for (FIndex fid : mergedFaces) { patch1.faces.Insert(fid); } // 标记第二个纹理块为已合并 patch2.label = NO_ID; patch2.faces.Release(); merged[idx2] = true; DEBUG_EXTRA("Merged patches %d and %d (overlap: %.2f%%)", idx1, idx2, overlap.first * 100.0f); } } // 移除空的纹理块 std::vector newIndices(texturePatches.size(), NO_ID); int newIdx = 0; for (int i = 0; i < (int)texturePatches.size(); ++i) { if (texturePatches[i].label != NO_ID && texturePatches[i].faces.GetSize() > 0) { texturePatches[newIdx] = texturePatches[i]; newIndices[i] = newIdx; ++newIdx; } else { newIndices[i] = NO_ID; } } texturePatches.Resize(newIdx); DEBUG_EXTRA("Merged overlapping patches, new count: %d", texturePatches.size()); } // 打包纹理块 void MeshTexture::PackTexturePatches(const Mesh::TexCoordArr& faceTexcoords2, const Mesh::TexIndexArr& faceTexindices2, std::vector& generatedTextures, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, int maxTextureSize) { DEBUG_EXTRA("Packing texture patches with enhanced algorithm"); if (texturePatches.IsEmpty()) { DEBUG_EXTRA("No texture patches to pack"); return; } // 收集所有需要打包的纹理块 std::vector validPatches; for (int i = 0; i < (int)texturePatches.size(); ++i) { if (texturePatches[i].label != NO_ID && !texturePatches[i].rect.empty()) { validPatches.push_back(&texturePatches[i]); } } if (validPatches.empty()) { DEBUG_EXTRA("No valid texture patches to pack"); return; } // 按视图分组纹理块 std::unordered_map> patchesByView; for (TexturePatch* patch : validPatches) { patchesByView[patch->label].push_back(patch); } // 为每个视图创建一个纹理图集 generatedTextures.clear(); std::vector> textureAtlases; for (auto& viewPatches : patchesByView) { std::vector& patches = viewPatches.second; // 对纹理块按大小排序(从大到小) std::sort(patches.begin(), patches.end(), [](const TexturePatch* a, const TexturePatch* b) { return a->rect.area() > b->rect.area(); }); // 使用矩形打包算法 std::vector rectangles; std::vector rectPatches; for (TexturePatch* patch : patches) { rectangles.push_back(patch->rect); rectPatches.push_back(patch); } // 计算所需纹理大小 int totalArea = 0; for (const cv::Rect& rect : rectangles) { totalArea += rect.area(); } // 估计纹理大小(考虑填充) int estimatedSize = (int)std::ceil(std::sqrt(totalArea * 1.5f)); // 对齐到倍数 if (nTextureSizeMultiple > 1) { estimatedSize = ((estimatedSize + nTextureSizeMultiple - 1) / nTextureSizeMultiple) * nTextureSizeMultiple; } // 限制最大大小 if (maxTextureSize > 0 && estimatedSize > maxTextureSize) { estimatedSize = maxTextureSize; } // 创建纹理图集 cv::Mat texture(estimatedSize, estimatedSize, CV_8UC3, cv::Scalar(0, 0, 0)); // 简单的打包策略:从左到右,从上到下 int x = 0, y = 0; int maxRowHeight = 0; for (size_t i = 0; i < rectangles.size(); ++i) { cv::Rect& rect = rectangles[i]; TexturePatch* patch = rectPatches[i]; // 检查是否适合当前行 if (x + rect.width > estimatedSize) { // 换行 x = 0; y += maxRowHeight; maxRowHeight = 0; } // 检查是否适合纹理 if (y + rect.height > estimatedSize) { // 纹理太小,需要调整 DEBUG_EXTRA("Texture atlas too small, increasing size"); // 这里可以增加纹理大小或使用更复杂的打包算法 break; } // 放置纹理块 patch->rect.x = x; patch->rect.y = y; x += rect.width; if (rect.height > maxRowHeight) { maxRowHeight = rect.height; } } generatedTextures.push_back(texture); textureAtlases.push_back(patches); } DEBUG_EXTRA("Packed %zu texture atlases", generatedTextures.size()); } // 自适应锐化 void MeshTexture::ApplyAdaptiveSharpening(std::vector& textures, float fSharpnessWeight) { if (fSharpnessWeight <= 0.0f || textures.empty()) { return; } DEBUG_EXTRA("Applying adaptive sharpening to textures (weight: %.2f)", fSharpnessWeight); for (size_t i = 0; i < textures.size(); ++i) { cv::Mat& texture = textures[i]; if (texture.empty()) continue; // 计算图像的梯度幅度,用于自适应锐化 cv::Mat gray, gradient; cv::cvtColor(texture, gray, cv::COLOR_BGR2GRAY); // 计算Sobel梯度 cv::Mat gradX, gradY; cv::Sobel(gray, gradX, CV_32F, 1, 0, 3); cv::Sobel(gray, gradY, CV_32F, 0, 1, 3); cv::magnitude(gradX, gradY, gradient); // 归一化梯度 double minVal, maxVal; cv::minMaxLoc(gradient, &minVal, &maxVal); if (maxVal > minVal) { gradient = (gradient - minVal) / (maxVal - minVal); } // 对图像进行锐化 cv::Mat sharpened; // 使用非锐化掩模 cv::Mat blurred; cv::GaussianBlur(texture, blurred, cv::Size(0, 0), 3.0); // 自适应锐化:在边缘区域使用更强的锐化 for (int y = 0; y < texture.rows; ++y) { for (int x = 0; x < texture.cols; ++x) { float edgeStrength = gradient.at(y, x); float weight = fSharpnessWeight * (0.5f + edgeStrength * 0.5f); cv::Vec3b& srcPixel = texture.at(y, x); cv::Vec3b& blurPixel = blurred.at(y, x); cv::Vec3b& dstPixel = sharpened.at(y, x); for (int c = 0; c < 3; ++c) { int value = static_cast(srcPixel[c] + weight * (srcPixel[c] - blurPixel[c])); dstPixel[c] = cv::saturate_cast(value); } } } texture = sharpened; } DEBUG_EXTRA("Adaptive sharpening completed"); } // 填充纹理空洞 void MeshTexture::FillTextureHoles(std::vector& textures, Pixel8U colEmpty) { if (textures.empty()) { return; } DEBUG_EXTRA("Filling holes in textures"); cv::Scalar emptyColor(colEmpty.r, colEmpty.g, colEmpty.b); for (size_t i = 0; i < textures.size(); ++i) { cv::Mat& texture = textures[i]; if (texture.empty()) continue; // 创建掩模,标识空洞区域 cv::Mat mask(texture.size(), CV_8UC1, cv::Scalar(0)); for (int y = 0; y < texture.rows; ++y) { for (int x = 0; x < texture.cols; ++x) { cv::Vec3b pixel = texture.at(y, x); if (pixel[0] == emptyColor[0] && pixel[1] == emptyColor[1] && pixel[2] == emptyColor[2]) { mask.at(y, x) = 255; } } } int holeCount = cv::countNonZero(mask); if (holeCount == 0) { continue; // 没有空洞 } // DEBUG_VERBOSE("Texture %zu: filling %d holes", i, holeCount); // 使用修复算法填充空洞 cv::Mat inpainted; cv::inpaint(texture, mask, inpainted, 3, cv::INPAINT_TELEA); // 将修复的区域复制回原图像 for (int y = 0; y < texture.rows; ++y) { for (int x = 0; x < texture.cols; ++x) { if (mask.at(y, x) == 255) { texture.at(y, x) = inpainted.at(y, x); } } } } DEBUG_EXTRA("Hole filling completed"); } bool MeshTexture::TextureWithExistingUV( const IIndexArr& views, int nIgnoreMaskLabel, float fOutlierThreshold, unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight, const Mesh::Image8U3Arr& existingTextures, const Mesh::TexCoordArr& existingTexcoords, const Mesh::TexIndexArr& existingTexindices) { DEBUG_EXTRA("TextureWithExistingUV - 使用3D几何坐标作为桥梁"); TD_TIMER_START(); // 1. 验证输入 if (scene.mesh.faceTexcoords.empty()) { VERBOSE("error: mesh does not contain UV coordinates"); return false; } if (existingTextures.empty()) { VERBOSE("error: no existing texture data provided"); return false; } DEBUG_EXTRA("Processing %zu faces with existing texture data", scene.mesh.faces.size()); // 2. 为每个面选择最佳视图(从原始图像,而不是已有纹理) FaceDataViewArr facesDatas; if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, false)) { return false; } // 3. 为每个面分配最佳视图 LabelArr faceLabels(scene.mesh.faces.size()); FOREACH(idxFace, scene.mesh.faces) { const FaceDataArr& faceDatas = facesDatas[idxFace]; if (faceDatas.empty()) { faceLabels[idxFace] = 0; // 无视图可用 continue; } // 选择质量最高的视图 float bestQuality = -1; IIndex bestView = NO_ID; for (const FaceData& data : faceDatas) { if (data.quality > bestQuality && !data.bInvalidFacesRelative) { bestQuality = data.quality; bestView = data.idxView; } } faceLabels[idxFace] = (bestView != NO_ID) ? (bestView + 1) : 0; } // 4. 生成纹理图集 Mesh::Image8U3Arr generatedTextures = GenerateTextureAtlasWith3DBridge( faceLabels, views, existingTextures, existingTexcoords, existingTexindices, nTextureSizeMultiple, colEmpty, fSharpnessWeight ); if (!generatedTextures.empty()) { // 检查颜色通道 CheckColorChannels(generatedTextures[0], "生成的纹理"); // 检查源纹理颜色通道 if (!existingTextures.empty()) { CheckColorChannels(existingTextures[0], "源纹理"); } // 保存纹理 scene.mesh.texturesDiffuse = std::move(generatedTextures); // 设置面的纹理索引 if (scene.mesh.texturesDiffuse.size() > 1) { scene.mesh.faceTexindices.resize(scene.mesh.faces.size()); for (size_t i = 0; i < scene.mesh.faces.size(); ++i) { scene.mesh.faceTexindices[i] = 0; } } else { scene.mesh.faceTexindices.Release(); } DEBUG_EXTRA("Generated %zu textures from existing data", scene.mesh.texturesDiffuse.size()); return true; } DEBUG_EXTRA("Texture generation failed"); return false; } Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( const LabelArr& faceLabels, const IIndexArr& views, const Mesh::Image8U3Arr& sourceTextures, const Mesh::TexCoordArr& sourceTexcoords, const Mesh::TexIndexArr& sourceTexindices, unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight) { DEBUG_EXTRA("GenerateTextureAtlasWith3DBridge - 使用3D几何坐标作为桥梁"); // 1. 分析外部UV布局 AABB2f uvBounds(true); FOREACH(i, scene.mesh.faceTexcoords) { const TexCoord& uv = scene.mesh.faceTexcoords[i]; uvBounds.InsertFull(uv); } // 确保UV在[0,1]范围内 if (uvBounds.ptMin.x() < 0 || uvBounds.ptMin.y() < 0 || uvBounds.ptMax.x() > 1 || uvBounds.ptMax.y() > 1) { DEBUG_EXTRA("UV超出[0,1]范围,进行归一化"); uvBounds = AABB2f(true); for (size_t i = 0; i < scene.mesh.faceTexcoords.size(); i += 3) { for (int v = 0; v < 3; ++v) { const TexCoord& uv = scene.mesh.faceTexcoords[i + v]; uvBounds.InsertFull(uv); } } } // 计算纹理尺寸 const float uvWidth = uvBounds.ptMax.x() - uvBounds.ptMin.x(); const float uvHeight = uvBounds.ptMax.y() - uvBounds.ptMin.y(); const int textureSize = ComputeOptimalTextureSize(uvWidth, uvHeight, nTextureSizeMultiple); // 创建目标纹理 Mesh::Image8U3Arr textures; Image8U3& textureAtlas = textures.emplace_back(textureSize, textureSize); // 注意:cv::Scalar 使用 BGR 顺序 textureAtlas.setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); DEBUG_EXTRA("生成纹理图集: 尺寸=%dx%d, UV范围=[%.3f,%.3f]-[%.3f,%.3f]", textureSize, textureSize, uvBounds.ptMin.x(), uvBounds.ptMin.y(), uvBounds.ptMax.x(), uvBounds.ptMax.y()); // 添加调试信息 DEBUG_EXTRA("colEmpty: R=%d, G=%d, B=%d", colEmpty.r, colEmpty.g, colEmpty.b); DEBUG_EXTRA("OpenCV图像通道顺序: BGR"); // 检查颜色通道顺序 if (!sourceTextures.empty()) { const Image8U3& firstTex = sourceTextures[0]; cv::Vec3b firstPixel = firstTex.at(0, 0); DEBUG_EXTRA("源纹理第一个像素: B=%d, G=%d, R=%d", firstPixel[0], firstPixel[1], firstPixel[2]); } // 3. 统计信息 int processedFaces = 0; int sampledPixels = 0; int failedFaces = 0; // 4. 为每个面采样 #ifdef _USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:processedFaces, sampledPixels, failedFaces) #endif for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { const FIndex faceID = (FIndex)idxFace; const Label label = faceLabels[faceID]; if (label == 0) { failedFaces++; continue; } const IIndex idxView = label - 1; if (idxView >= images.size()) { failedFaces++; continue; } // 获取面的几何信息 const TexCoord* meshUVs = &scene.mesh.faceTexcoords[faceID * 3]; const Face& face = scene.mesh.faces[faceID]; // 获取源纹理信息 const TexCoord* srcUVs = &sourceTexcoords[faceID * 3]; const TexIndex textureIdx = sourceTexindices.empty() ? 0 : sourceTexindices[faceID]; if (textureIdx >= sourceTextures.size()) { failedFaces++; continue; } const Image8U3& sourceTexture = sourceTextures[textureIdx]; // 计算面的UV边界 AABB2f faceBounds(true); for (int i = 0; i < 3; ++i) { faceBounds.InsertFull(meshUVs[i]); } // 转换为像素坐标 int startX = (int)(faceBounds.ptMin.x() * textureSize); int startY = (int)(faceBounds.ptMin.y() * textureSize); int endX = (int)(faceBounds.ptMax.x() * textureSize); int endY = (int)(faceBounds.ptMax.y() * textureSize); // 边界检查 startX = std::max(0, std::min(startX, textureSize - 1)); startY = std::max(0, std::min(startY, textureSize - 1)); endX = std::max(0, std::min(endX, textureSize - 1)); endY = std::max(0, std::min(endY, textureSize - 1)); if (startX >= endX || startY >= endY) { failedFaces++; continue; } int faceSampledPixels = 0; // 采样纹理 for (int y = startY; y <= endY; ++y) { for (int x = startX; x <= endX; ++x) { const Point2f texCoord((x + 0.5f) / textureSize, (y + 0.5f) / textureSize); // 计算重心坐标 Point3f barycentric; if (!PointInTriangle(texCoord, meshUVs[0], meshUVs[1], meshUVs[2], barycentric)) { continue; } // 计算3D点 const Vertex worldPoint = vertices[face[0]] * barycentric.x + vertices[face[1]] * barycentric.y + vertices[face[2]] * barycentric.z; // 方案A:从原始图像采样 Point2f imgPoint = ProjectPointWithAutoCorrection(images[idxView].camera, worldPoint, images[idxView]); if (imgPoint.x < 0 || imgPoint.x >= images[idxView].image.cols || imgPoint.y < 0 || imgPoint.y >= images[idxView].image.rows) { continue; } // 修正:使用双线性插值采样 const int x0 = (int)floor(imgPoint.x); const int y0 = (int)floor(imgPoint.y); const int x1 = std::min(x0 + 1, images[idxView].image.cols - 1); const int y1 = std::min(y0 + 1, images[idxView].image.rows - 1); const float fx = imgPoint.x - x0; const float fy = imgPoint.y - y0; const float fx1 = 1.0f - fx; const float fy1 = 1.0f - fy; // 采样四个点的颜色 const cv::Vec3b& c00 = images[idxView].image.at(y0, x0); const cv::Vec3b& c01 = images[idxView].image.at(y0, x1); const cv::Vec3b& c10 = images[idxView].image.at(y1, x0); const cv::Vec3b& c11 = images[idxView].image.at(y1, x1); // 双线性插值 const cv::Vec3b color = c00 * (fx1 * fy1) + c01 * (fx * fy1) + c10 * (fx1 * fy) + c11 * (fx * fy); // 注意:OpenCV是BGR顺序,而Pixel8U通常是RGB顺序 // 这里我们需要确保颜色通道正确 #ifdef _USE_OPENMP #pragma omp critical #endif { // 方案1:直接使用BGR顺序(OpenCV默认) // textureAtlas.at(y, x) = color; // 方案2:交换B和R通道(如果颜色偏蓝,说明B和R反了) // 将BGR转换为RGB textureAtlas.at(y, x) = cv::Vec3b(color[2], color[1], color[0]); } faceSampledPixels++; } } if (faceSampledPixels > 0) { processedFaces++; sampledPixels += faceSampledPixels; } else { failedFaces++; } } DEBUG_EXTRA("纹理采样完成: 成功 %d 个面, 失败 %d 个面, 采样 %d 像素", processedFaces, failedFaces, sampledPixels); // 5. 填充空洞 if (processedFaces > 0 && sampledPixels > 0) { // FillTextureGaps(textureAtlas, scene.mesh.faceTexcoords, (FIndex)scene.mesh.faces.size(), textureSize, colEmpty); } // 6. 锐化处理 if (fSharpnessWeight > 0) { // ApplySharpening(textureAtlas, fSharpnessWeight); } return textures; } Point2f MeshTexture::ProjectPointWithAutoCorrection(const Camera& camera, const Vertex& worldPoint, const Image& sourceImage) { Point2f imgPoint = camera.ProjectPointP(worldPoint); // 检查投影点是否在有效范围内 if (!sourceImage.image.isInside(imgPoint)) { // 尝试不同的偏移量来找到最佳投影点 std::vector testOffsets = { Point2f(0, sourceImage.image.rows * 0.015f), // 当前使用的偏移 Point2f(0, -sourceImage.image.rows * 0.015f), // 反向偏移 Point2f(sourceImage.image.cols * 0.015f, 0), // 水平偏移 Point2f(0, 0) // 无偏移 }; for (const auto& offset : testOffsets) { Point2f testPoint = imgPoint + offset; if (sourceImage.image.isInside(testPoint) && camera.IsInFront(worldPoint)) { return testPoint; } } } return imgPoint; } Point2f MeshTexture::ProjectPointRobust(const Camera& camera, const Vertex& worldPoint, const Image& sourceImage, float searchRadius) { Point2f imgPoint = camera.ProjectPointP(worldPoint); // 如果投影点有效,直接返回 if (sourceImage.image.isInside(imgPoint) && camera.IsInFront(worldPoint)) { return imgPoint; } // 在投影点周围搜索最佳匹配点 int searchSteps = 5; float stepSize = searchRadius * sourceImage.image.rows / searchSteps; // 在y方向进行搜索(主要偏差方向) for (int dy = -searchSteps; dy <= searchSteps; ++dy) { Point2f testPoint = imgPoint + Point2f(0, dy * stepSize); if (sourceImage.image.isInside(testPoint) && camera.IsInFront(worldPoint)) { // 可选:进一步验证这个点是否在面片边界内 return testPoint; } } // 如果在y方向没找到,尝试x方向 for (int dx = -searchSteps; dx <= searchSteps; ++dx) { Point2f testPoint = imgPoint + Point2f(dx * stepSize, 0); if (sourceImage.image.isInside(testPoint) && camera.IsInFront(worldPoint)) { return testPoint; } } // 如果都没找到,返回原始投影点(后续会跳过) return imgPoint; } // 辅助函数:计算点到线段的最短距离 Point2f ProjectPointToLineSegment(const Point2f& p, const Point2f& a, const Point2f& b) { Point2f ab = b - a; Point2f ap = p - a; float t = ap.dot(ab) / ab.dot(ab); t = std::max(0.0f, std::min(1.0f, t)); return a + ab * t; } // 辅助函数:从2D点计算重心坐标 Point3f BarycentricFromPoint(const Point2f& p, const Point2f& a, const Point2f& b, const Point2f& c) { Point2f v0 = b - a; Point2f v1 = c - a; Point2f v2 = p - a; float d00 = v0.dot(v0); float d01 = v0.dot(v1); float d11 = v1.dot(v1); float d20 = v2.dot(v0); float d21 = v2.dot(v1); float denom = d00 * d11 - d01 * d01; float v = (d11 * d20 - d01 * d21) / denom; float w = (d00 * d21 - d01 * d20) / denom; float u = 1.0f - v - w; return Point3f(u, v, w); } // 辅助函数:判断点是否在三角形内 bool PointInTriangle(const Point2f& p, const Point2f& a, const Point2f& b, const Point2f& c, Point3f& barycentric) { barycentric = BarycentricFromPoint(p, a, b, c); return (barycentric.x >= 0 && barycentric.x <= 1 && barycentric.y >= 0 && barycentric.y <= 1 && barycentric.z >= 0 && barycentric.z <= 1); } // 辅助函数:从图像中双线性插值采样颜色 // 修正颜色顺序和边界处理 Pixel8U SampleImageBilinear(const Image8U3& image, const Point2f& point) { // 边界检查,防止越界 float x = CLAMP(point.x, 0.0f, (float)(image.cols - 1)); float y = CLAMP(point.y, 0.0f, (float)(image.rows - 1)); int x0 = (int)floor(x); int y0 = (int)floor(y); int x1 = std::min(x0 + 1, image.cols - 1); int y1 = std::min(y0 + 1, image.rows - 1); // 确保x0,y0不会超出下界 x0 = std::max(0, x0); y0 = std::max(0, y0); float dx = x - x0; float dy = y - y0; float dx1 = 1.0f - dx; float dy1 = 1.0f - dy; // 获取四个角点的像素 const Pixel8U& p00 = image(y0, x0); const Pixel8U& p01 = image(y0, x1); const Pixel8U& p10 = image(y1, x0); const Pixel8U& p11 = image(y1, x1); // 方法1:使用结构体成员访问(推荐) float b = p00.b * dx1 * dy1 + p01.b * dx * dy1 + p10.b * dx1 * dy + p11.b * dx * dy; float g = p00.g * dx1 * dy1 + p01.g * dx * dy1 + p10.g * dx1 * dy + p11.g * dx * dy; float r = p00.r * dx1 * dy1 + p01.r * dx * dy1 + p10.r * dx1 * dy + p11.r * dx * dy; /* // 方法2:如果Pixel8U支持[]操作符 // 注意:OpenMVS的Pixel8U可能是BGR或RGB顺序,需要根据实际情况调整 float b = p00[0] * dx1 * dy1 + p01[0] * dx * dy1 + p10[0] * dx1 * dy + p11[0] * dx * dy; float g = p00[1] * dx1 * dy1 + p01[1] * dx * dy1 + p10[1] * dx1 * dy + p11[1] * dx * dy; float r = p00[2] * dx1 * dy1 + p01[2] * dx * dy1 + p10[2] * dx1 * dy + p11[2] * dx * dy; */ return Pixel8U( (unsigned char)CLAMP(b, 0.0f, 255.0f), (unsigned char)CLAMP(g, 0.0f, 255.0f), (unsigned char)CLAMP(r, 0.0f, 255.0f) ); } void FillTextureGaps2(Image8U3& textureAtlas, const Mesh::TexCoordArr& faceTexcoords, FIndex nFaces, const MeshTexture::LabelArr& faceLabels, const IIndexArr& views, int textureSize, Pixel8U colEmpty) { DEBUG_EXTRA("开始填充纹理间隙..."); // 创建距离场,记录每个像素到最近三角形的距离 cv::Mat distanceField(textureSize, textureSize, CV_32FC1, cv::Scalar(std::numeric_limits::max())); cv::Mat nearestFaceIdx(textureSize, textureSize, CV_32SC1, cv::Scalar(-1)); // 1. 计算距离场 for (FIndex idxFace = 0; idxFace < nFaces; ++idxFace) { if (faceLabels[idxFace] == 0) continue; const TexCoord* uvCoords = &faceTexcoords[idxFace * 3]; // 计算三角形的包围盒 int minX = textureSize, maxX = 0; int minY = textureSize, maxY = 0; for (int i = 0; i < 3; ++i) { int px = std::max(0, std::min(textureSize - 1, (int)(uvCoords[i].x * textureSize))); int py = std::max(0, std::min(textureSize - 1, (int)(uvCoords[i].y * textureSize))); minX = std::min(minX, px); maxX = std::max(maxX, px); minY = std::min(minY, py); maxY = std::max(maxY, py); } // 扩展包围盒以填充间隙 const int margin = 3; // 扩展3个像素 minX = std::max(0, minX - margin); maxX = std::min(textureSize - 1, maxX + margin); minY = std::max(0, minY - margin); maxY = std::min(textureSize - 1, maxY + margin); for (int y = minY; y <= maxY; ++y) { for (int x = minX; x <= maxX; ++x) { Point2f texCoord((float)x / textureSize, (float)y / textureSize); // 计算到三角形的最短距离 float minDist = std::numeric_limits::max(); // 计算到三角形边的距离 for (int i = 0; i < 3; ++i) { const Point2f& p1 = uvCoords[i]; const Point2f& p2 = uvCoords[(i + 1) % 3]; Point2f proj = ProjectPointToLineSegment(texCoord, p1, p2); float dist = cv::norm(texCoord - proj); minDist = std::min(minDist, dist); } // 如果距离更近,更新距离场 float& currentDist = distanceField.at(y, x); if (minDist < currentDist) { currentDist = minDist; nearestFaceIdx.at(y, x) = idxFace; } } } } // 2. 填充空白区域 const float maxFillDistance = 5.0f / textureSize; // 最大填充距离 for (int iteration = 0; iteration < 2; ++iteration) { int filledCount = 0; for (int y = 0; y < textureSize; ++y) { for (int x = 0; x < textureSize; ++x) { // 如果当前像素是空白 Pixel8U currentPixel = textureAtlas(y, x); if (currentPixel[0] == colEmpty[0] && currentPixel[1] == colEmpty[1] && currentPixel[2] == colEmpty[2]) { int nearestFace = nearestFaceIdx.at(y, x); float dist = distanceField.at(y, x); if (nearestFace >= 0 && dist <= maxFillDistance) { // 找到最近的已填充像素 bool foundColor = false; float sumB = 0.0f, sumG = 0.0f, sumR = 0.0f; int count = 0; // 检查3x3邻域 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int ny = y + dy; int nx = x + dx; if (ny >= 0 && ny < textureSize && nx >= 0 && nx < textureSize) { Pixel8U neighborPixel = textureAtlas(ny, nx); if (neighborPixel[0] != colEmpty[0] || neighborPixel[1] != colEmpty[1] || neighborPixel[2] != colEmpty[2]) { // 注意:OpenCV的Mat存储顺序是BGR sumB += static_cast(neighborPixel[0]); sumG += static_cast(neighborPixel[1]); sumR += static_cast(neighborPixel[2]); count++; foundColor = true; } } } } if (foundColor && count > 0) { // 手动计算平均值 Pixel8U avgColor; avgColor[0] = static_cast(sumB / count + 0.5f); // B avgColor[1] = static_cast(sumG / count + 0.5f); // G avgColor[2] = static_cast(sumR / count + 0.5f); // R textureAtlas(y, x) = avgColor; filledCount++; } } } } } DEBUG_EXTRA("填充迭代 %d: 填充了 %d 个像素", iteration + 1, filledCount); if (filledCount == 0) break; } // 3. 使用图像修复算法填充剩余空白 cv::Mat mask = cv::Mat::zeros(textureSize, textureSize, CV_8UC1); for (int y = 0; y < textureSize; ++y) { for (int x = 0; x < textureSize; ++x) { Pixel8U pixel = textureAtlas(y, x); if (pixel[0] == colEmpty[0] && pixel[1] == colEmpty[1] && pixel[2] == colEmpty[2]) { mask.at(y, x) = 255; } } } if (cv::countNonZero(mask) > 0) { cv::Mat textureMat = textureAtlas; // OpenCV Mat会自动引用计数 cv::Mat inpaintResult; cv::inpaint(textureMat, mask, inpaintResult, 3, cv::INPAINT_TELEA); inpaintResult.copyTo(textureAtlas); } DEBUG_EXTRA("纹理间隙填充完成"); }// 修改TextureGrid结构体,在构造函数中传入纹理大小 struct TextureGrid { std::vector> grid; float cellSize; int gridSize; int textureSize; // 添加纹理大小成员变量 TextureGrid(int _textureSize, int cellCount) : textureSize(_textureSize) { cellSize = (float)textureSize / cellCount; gridSize = cellCount; grid.resize(gridSize * gridSize); } void AddTriangle(FIndex faceIdx, const TexCoord* uvCoords, int _textureSize) { // 添加纹理大小参数 int minX = _textureSize, maxX = 0; int minY = _textureSize, maxY = 0; for (int i = 0; i < 3; ++i) { int px = std::max(0, std::min(_textureSize - 1, (int)(uvCoords[i].x * _textureSize))); int py = std::max(0, std::min(_textureSize - 1, (int)(uvCoords[i].y * _textureSize))); minX = std::min(minX, px); maxX = std::max(maxX, px); minY = std::min(minY, py); maxY = std::max(maxY, py); } int cellMinX = std::max(0, (int)(minX / cellSize)); int cellMaxX = std::min(gridSize-1, (int)(maxX / cellSize)); int cellMinY = std::max(0, (int)(minY / cellSize)); int cellMaxY = std::min(gridSize-1, (int)(maxY / cellSize)); for (int cy = cellMinY; cy <= cellMaxY; ++cy) { for (int cx = cellMinX; cx <= cellMaxX; ++cx) { grid[cy * gridSize + cx].push_back(faceIdx); } } } const std::vector& GetTrianglesInCell(int x, int y) const { return grid[y * gridSize + x]; } }; void GenerateDistanceFieldFast(cv::Mat& distanceField, cv::Mat& nearestFaceIdx, const std::vector& seedPoints, int textureSize) { // 使用跳点传播算法 distanceField = cv::Mat::zeros(textureSize, textureSize, CV_32FC1); nearestFaceIdx = cv::Mat::zeros(textureSize, textureSize, CV_32SC1); // 初始化距离场 distanceField.setTo(std::numeric_limits::max()); // 标记种子点 for (const auto& pt : seedPoints) { distanceField.at(pt) = 0; nearestFaceIdx.at(pt) = pt.y * textureSize + pt.x; // 存储自身索引 } // 跳点传播 for (int step = textureSize / 2; step >= 1; step /= 2) { #pragma omp parallel for collapse(2) for (int y = 0; y < textureSize; ++y) { for (int x = 0; x < textureSize; ++x) { float minDist = distanceField.at(y, x); int bestIdx = nearestFaceIdx.at(y, x); for (int dy = -1; dy <= 1; dy += 2) { for (int dx = -1; dx <= 1; dx += 2) { int nx = x + dx * step; int ny = y + dy * step; if (nx >= 0 && nx < textureSize && ny >= 0 && ny < textureSize) { float dist = distanceField.at(ny, nx) + sqrtf((dx*dx + dy*dy) * step * step); if (dist < minDist) { minDist = dist; bestIdx = nearestFaceIdx.at(ny, nx); } } } } distanceField.at(y, x) = minDist; nearestFaceIdx.at(y, x) = bestIdx; } } } } void FillTextureGaps( Image8U3& texture, const Mesh::TexCoordArr& uvs, FIndex faceCount, int textureSize, Pixel8U colEmpty ) { DEBUG_EXTRA("Filling texture gaps for %dx%d texture", texture.cols, texture.rows); // 创建掩码图像 cv::Mat mask = cv::Mat::zeros(texture.rows, texture.cols, CV_8UC1); // 标记有纹理的像素 for (FIndex fid = 0; fid < faceCount; ++fid) { const TexCoord* uv = &uvs[fid * 3]; // 计算面的UV边界 AABB2f faceBounds(true); for (int i = 0; i < 3; ++i) { Point2f pixelPos(uv[i].x * textureSize, uv[i].y * textureSize); faceBounds.InsertFull(pixelPos); } // 填充三角形区域 const int startX = std::max(0, (int)faceBounds.ptMin.x()); const int startY = std::max(0, (int)faceBounds.ptMin.y()); const int endX = std::min(texture.cols - 1, (int)faceBounds.ptMax.x()); const int endY = std::min(texture.rows - 1, (int)faceBounds.ptMax.y()); for (int y = startY; y <= endY; ++y) { for (int x = startX; x <= endX; ++x) { Point2f texPos((float)x / textureSize, (float)y / textureSize); // 检查是否在UV三角形内 Point3f barycentric; if (PointInTriangle(texPos, uv[0], uv[1], uv[2], barycentric)) { mask.at(y, x) = 255; } } } } // 使用膨胀操作扩展掩码 cv::Mat dilatedMask; cv::dilate(mask, dilatedMask, cv::Mat(), cv::Point(-1, -1), 2); // 对空洞区域进行填充 cv::inpaint(texture, mask, texture, 3, cv::INPAINT_TELEA); DEBUG_EXTRA("Texture gap filling completed"); } void ApplySharpening(Image8U3& texture, float weight) { if (weight <= 0) return; Image8U3 blurred; cv::GaussianBlur(texture, blurred, cv::Size(0, 0), 1.5); cv::addWeighted(texture, 1.0 + weight, blurred, -weight, 0, texture); } Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasFromUV( const Mesh::Image8U3Arr& sourceTextures, // 已有纹理数组 const Mesh::TexCoordArr& sourceTexcoords, // 已有UV坐标 const Mesh::TexIndexArr& sourceTexindices, // 已有纹理索引 unsigned nTextureSizeMultiple, Pixel8U colEmpty, float fSharpnessWeight ) { DEBUG_EXTRA("Generating texture atlas from existing UV data with %zu source textures", sourceTextures.size()); // 1. 分析模型原始UV布局 AABB2f uvBounds(true); FOREACH(i, scene.mesh.faceTexcoords) { const TexCoord& uv = scene.mesh.faceTexcoords[i]; uvBounds.InsertFull(uv); } // 确保UV在[0,1]范围内 if (uvBounds.ptMin.x() < 0 || uvBounds.ptMin.y() < 0 || uvBounds.ptMax.x() > 1 || uvBounds.ptMax.y() > 1) { // UV超出范围,进行归一化 DEBUG_EXTRA("UV coordinates out of [0,1] range, normalizing..."); uvBounds = AABB2f(true); for (size_t i = 0; i < scene.mesh.faceTexcoords.size(); i += 3) { for (int v = 0; v < 3; ++v) { const TexCoord& uv = scene.mesh.faceTexcoords[i + v]; uvBounds.InsertFull(uv); } } } // 计算纹理尺寸 const float uvWidth = uvBounds.ptMax.x() - uvBounds.ptMin.x(); const float uvHeight = uvBounds.ptMax.y() - uvBounds.ptMin.y(); const int textureSize = ComputeOptimalTextureSize(uvWidth, uvHeight, nTextureSizeMultiple); // 2. 创建单个纹理图集 Mesh::Image8U3Arr textures; Image8U3& textureAtlas = textures.emplace_back(textureSize, textureSize); textureAtlas.setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); DEBUG_EXTRA("Creating texture atlas: %dx%d, UV bounds: [%.3f,%.3f]-[%.3f,%.3f]", textureSize, textureSize, uvBounds.ptMin.x(), uvBounds.ptMin.y(), uvBounds.ptMax.x(), uvBounds.ptMax.y()); // 3. 为每个面采样颜色 int processedFaces = 0; int failedFaces = 0; #ifdef _USE_OPENMP #pragma omp parallel for schedule(dynamic) reduction(+:processedFaces, failedFaces) #endif for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { const FIndex faceID = (FIndex)idxFace; const Face& face = scene.mesh.faces[faceID]; // 获取模型原始UV坐标 const TexCoord* modelUVs = &scene.mesh.faceTexcoords[faceID * 3]; // 获取对应source纹理中的UV坐标 const TexCoord* sourceUVs = &sourceTexcoords[faceID * 3]; const TexIndex textureIdx = sourceTexindices.empty() ? 0 : sourceTexindices[faceID]; if (textureIdx >= sourceTextures.size()) { failedFaces++; continue; } const Image8U3& sourceTexture = sourceTextures[textureIdx]; // 计算面片在目标纹理中的UV边界 AABB2f faceUVBounds(true); for (int i = 0; i < 3; ++i) { // 从模型UV映射到纹理坐标 const float u = (modelUVs[i].x - uvBounds.ptMin.x()) / uvWidth; const float v = (modelUVs[i].y - uvBounds.ptMin.y()) / uvHeight; faceUVBounds.InsertFull(Point2f(u, v)); } // 转换为像素坐标 const int startX = std::max(0, (int)(faceUVBounds.ptMin.x() * textureSize)); const int startY = std::max(0, (int)(faceUVBounds.ptMin.y() * textureSize)); const int endX = std::min(textureSize - 1, (int)(faceUVBounds.ptMax.x() * textureSize)); const int endY = std::min(textureSize - 1, (int)(faceUVBounds.ptMax.y() * textureSize)); if (startX > endX || startY > endY) { failedFaces++; continue; } // 对面片覆盖的每个像素进行采样 for (int y = startY; y <= endY; ++y) { for (int x = startX; x <= endX; ++x) { const Point2f texCoord((float)x / textureSize, (float)y / textureSize); // 1. 检查是否在模型UV三角形内 Point3f barycentric; if (!PointInTriangle(texCoord, Point2f((modelUVs[0].x - uvBounds.ptMin.x()) / uvWidth, (modelUVs[0].y - uvBounds.ptMin.y()) / uvHeight), Point2f((modelUVs[1].x - uvBounds.ptMin.x()) / uvWidth, (modelUVs[1].y - uvBounds.ptMin.y()) / uvHeight), Point2f((modelUVs[2].x - uvBounds.ptMin.x()) / uvWidth, (modelUVs[2].y - uvBounds.ptMin.y()) / uvHeight), barycentric)) { continue; } // 2. 使用相同的重心坐标在source纹理UV中插值 Point2f sourceTexCoord( sourceUVs[0].x * barycentric.x + sourceUVs[1].x * barycentric.y + sourceUVs[2].x * barycentric.z, sourceUVs[0].y * barycentric.x + sourceUVs[1].y * barycentric.y + sourceUVs[2].y * barycentric.z ); // 确保UV坐标在有效范围内 if (sourceTexCoord.x < 0 || sourceTexCoord.x >= sourceTexture.cols || sourceTexCoord.y < 0 || sourceTexCoord.y >= sourceTexture.rows) { continue; } // 3. 从source纹理中采样颜色 const cv::Vec3b color = sourceTexture.at( (int)sourceTexCoord.y, (int)sourceTexCoord.x); // 4. 写入目标纹理 textureAtlas.at(y, x) = color; } } processedFaces++; } DEBUG_EXTRA("Texture sampling completed: %d faces processed, %d faces failed", processedFaces, failedFaces); // 4. 填充纹理空隙 if (processedFaces > 0) { // FillTextureGaps(textureAtlas, scene.mesh.faceTexcoords, (FIndex)scene.mesh.faces.size(), textureSize, colEmpty); } // 5. 应用锐化 if (fSharpnessWeight > 0) { // ApplySharpening(textureAtlas, fSharpnessWeight); } DEBUG_EXTRA("Generated texture atlas: %dx%d from %zu source textures", textureSize, textureSize, sourceTextures.size()); return textures; } bool MeshTexture::ValidateProjection(const Vertex& worldPoint, const Image& sourceImage, Point2f imgPoint, float maxReprojectionError) { // 1. 前向投影:3D点 → 2D图像坐标 Point2f projectedPoint = sourceImage.camera.ProjectPointP(worldPoint); // 2. 计算重投影误差 float reprojectionError = norm(projectedPoint - imgPoint); // 3. 设置误差阈值 if (reprojectionError > maxReprojectionError) { DEBUG_EXTRA("重投影误差过大: %.3f像素,跳过该采样点", reprojectionError); return false; } // 4. 视线方向一致性检查 if (!sourceImage.camera.IsInFront(worldPoint)) { DEBUG_EXTRA("点位于相机后方,跳过"); return false; } return true; } Pixel8U MeshTexture::SampleImageBilinear(const Image8U3& image, const Point2f& point) { const int x1 = (int)point.x; const int y1 = (int)point.y; const int x2 = std::min(x1 + 1, image.cols - 1); const int y2 = std::min(y1 + 1, image.rows - 1); const float dx = point.x - x1; const float dy = point.y - y1; const Pixel8U& p11 = image(y1, x1); const Pixel8U& p12 = image(y1, x2); const Pixel8U& p21 = image(y2, x1); const Pixel8U& p22 = image(y2, x2); Pixel8U result; for (int i = 0; i < 3; ++i) { result[i] = (uint8_t)( p11[i] * (1-dx)*(1-dy) + p12[i] * dx*(1-dy) + p21[i] * (1-dx)*dy + p22[i] * dx*dy ); } return result; } void MeshTexture::ProjectFaceToTexture(FIndex faceID, IIndex viewID, const TexCoord* uv, Image8U3& texture) { // DEBUG_EXTRA("ProjectFaceToTexture 1"); const Image& image = images[viewID]; const Face& face = scene.mesh.faces[faceID]; // 计算面的包围盒(在纹理空间中) Point2f minUV(FLT_MAX, FLT_MAX), maxUV(FLT_MIN, FLT_MIN); for (int i = 0; i < 3; ++i) { minUV.x = MIN(minUV.x, uv[i].x); minUV.y = MIN(minUV.y, uv[i].y); maxUV.x = MAX(maxUV.x, uv[i].x); maxUV.y = MAX(maxUV.y, uv[i].y); } // 将UV坐标转换到纹理像素坐标 const int texWidth = texture.cols; const int texHeight = texture.rows; const int startX = MAX(0, (int)(minUV.x * texWidth)); const int startY = MAX(0, (int)(minUV.y * texHeight)); const int endX = MIN(texWidth - 1, (int)(maxUV.x * texWidth)); const int endY = MIN(texHeight - 1, (int)(maxUV.y * texHeight)); // 对纹理中的每个像素进行采样 for (int y = startY; y <= endY; ++y) { for (int x = startX; x <= endX; ++x) { // 将像素坐标转换回UV坐标 const Point2f texCoord((float)x / texWidth, (float)y / texHeight); // DEBUG_EXTRA("ProjectFaceToTexture1 %d", x); // 检查点是否在三角形内[6,8](@ref) Point3f bary; if (!PointInTriangle(texCoord, uv[0], uv[1], uv[2], bary)) { continue; } // DEBUG_EXTRA("ProjectFaceToTexture2 %d", x); // 计算3D空间中的对应点 const Vertex worldPoint = vertices[face[0]] * bary.x + vertices[face[1]] * bary.y + vertices[face[2]] * bary.z; // 将3D点投影到图像 Point2f imgPoint; // 确保 worldPoint 的类型与相机参数匹配 const TPoint3 worldPointDouble( static_cast(worldPoint.x), static_cast(worldPoint.y), static_cast(worldPoint.z) ); imgPoint = image.camera.ProjectPoint(worldPointDouble); // 添加有效性检查 if (!image.camera.IsInFront(worldPoint)) { continue; // 点不在相机前方,跳过 } // DEBUG_EXTRA("ProjectFaceToTexture3 %d", x); // 检查投影点是否在图像范围内 if (!image.camera.IsInside(imgPoint, Point2f(image.width, image.height))) { continue; // 点不在图像内,跳过 } // DEBUG_EXTRA("ProjectFaceToTexture4 %d", x); // 检查投影点是否在图像范围内 // 从图像中采样颜色(使用双线性插值) if (image.image.isInside(imgPoint)) { // 获取图像中对应点的颜色[1,4](@ref) const int imgX = (int)imgPoint.x; const int imgY = (int)imgPoint.y; // 边界检查 if (imgX >= 0 && imgX < image.image.cols - 1 && imgY >= 0 && imgY < image.image.rows - 1) { // 双线性插值参数 const float dx = imgPoint.x - imgX; const float dy = imgPoint.y - imgY; const float w1 = (1 - dx) * (1 - dy); const float w2 = dx * (1 - dy); const float w3 = (1 - dx) * dy; const float w4 = dx * dy; // 获取四个相邻像素的颜色 const Pixel8U& p1 = image.image(imgY, imgX); const Pixel8U& p2 = image.image(imgY, imgX + 1); const Pixel8U& p3 = image.image(imgY + 1, imgX); const Pixel8U& p4 = image.image(imgY + 1, imgX + 1); // 计算加权平均颜色 Pixel8U finalColor; finalColor.r = (uint8_t)(p1.r * w1 + p2.r * w2 + p3.r * w3 + p4.r * w4); finalColor.g = (uint8_t)(p1.g * w1 + p2.g * w2 + p3.g * w3 + p4.g * w4); finalColor.b = (uint8_t)(p1.b * w1 + p2.b * w2 + p3.b * w3 + p4.b * w4); // DEBUG_EXTRA("ProjectFaceToTexture5 finalColor=(%d,%d,%d)", finalColor.r, finalColor.g, finalColor.b); // 将颜色赋给纹理像素[1](@ref) texture(y, x) = finalColor; } else { // 边界情况:使用最近邻插值 const int safeX = std::min(std::max(0, (int)imgPoint.x), image.image.cols - 1); const int safeY = std::min(std::max(0, (int)imgPoint.y), image.image.rows - 1); texture(y, x) = image.image(safeY, safeX); // DEBUG_EXTRA("ProjectFaceToTexture6 %d", x); } } } } } #include /** * @brief 使用重心坐标法判断点是否在三角形内,并计算重心坐标 * @param p 需要判断的点(纹理坐标空间中的点) * @param a 三角形的第一个顶点 * @param b 三角形的第二个顶点 * @param c 三角形的第三个顶点 * @param bary 输出的重心坐标 (1-u-v, u, v) * @return bool 如果点在三角形内返回true,否则返回false */ bool MeshTexture::PointInTriangle(const Point2f& p, const Point2f& a, const Point2f& b, const Point2f& c, Point3f& bary) { // 添加调试输出 // DEBUG_EXTRA("PointInTriangle - Input: p(%.6f,%.6f), a(%.6f,%.6f), b(%.6f,%.6f), c(%.6f,%.6f)", // p.x, p.y, a.x, a.y, b.x, b.y, c.x, c.y); // 检查输入有效性 if (!std::isfinite(p.x) || !std::isfinite(p.y) || !std::isfinite(a.x) || !std::isfinite(a.y) || !std::isfinite(b.x) || !std::isfinite(b.y) || !std::isfinite(c.x) || !std::isfinite(c.y)) { // DEBUG_EXTRA("PointInTriangle - Invalid input coordinates"); return false; } // 计算边向量 Point2f v0 = b - a; Point2f v1 = c - a; Point2f v2 = p - a; // 计算必要的点积 float dot00 = v0.x * v0.x + v0.y * v0.y; float dot01 = v0.x * v1.x + v0.y * v1.y; float dot02 = v0.x * v2.x + v0.y * v2.y; float dot11 = v1.x * v1.x + v1.y * v1.y; float dot12 = v1.x * v2.x + v1.y * v2.y; // 计算分母(三角形平行四边形面积的两倍) float denom = dot00 * dot11 - dot01 * dot01; // 处理退化三角形情况(面积接近0) const float epsilon = 1e-10f; if (std::abs(denom) < epsilon) { // DEBUG_EXTRA("PointInTriangle - Degenerate triangle, denom=%.10f", denom); // return false; } // 计算重心坐标参数 float invDenom = 1.0f / denom; float u = (dot11 * dot02 - dot01 * dot12) * invDenom; float v = (dot00 * dot12 - dot01 * dot02) * invDenom; // DEBUG_EXTRA("PointInTriangle - u=%.6f, v=%.6f, u+v=%.6f", u, v, u+v); // 检查点是否在三角形内(使用更宽松的容差) if (u >= -epsilon && v >= -epsilon && (u + v) <= 1.0f + epsilon) { // 点在三角形内,计算完整的重心坐标 bary.x = 1.0f - u - v; bary.y = u; bary.z = v; // DEBUG_EXTRA("PointInTriangle - Point INSIDE triangle, bary(%.3f,%.3f,%.3f)", bary.x, bary.y, bary.z); return true; } // DEBUG_EXTRA("PointInTriangle - Point OUTSIDE triangle"); return false; } // 辅助函数:计算最佳纹理尺寸 int MeshTexture::ComputeOptimalTextureSize(float uvWidth, float uvHeight, unsigned multiple) { // 计算所需尺寸 int baseWidth = (int)ceil(uvWidth * 2048); // 假设基础分辨率 int baseHeight = (int)ceil(uvHeight * 2048); // 向上对齐到multiple的倍数 int width = ((baseWidth + multiple - 1) / multiple) * multiple; int height = ((baseHeight + multiple - 1) / multiple) * multiple; // 确保最小尺寸 width = std::max(width, 256); height = std::max(height, 256); // 使用最大尺寸 int size = std::max(width, height); // 限制最大尺寸 const int MAX_SIZE = 8192; if (size > MAX_SIZE) { DEBUG_EXTRA("Warning: Texture size %d exceeds maximum %d, reducing...", size, MAX_SIZE); size = std::min(size, MAX_SIZE); } return size; } // 保存遮挡数据到文件 void Scene::SaveVisibleFacesData(std::map>& visible_faces_map, std::unordered_set& face_visible_relative, std::map>& edge_faces_map, std::map>& delete_edge_faces_map, std::string& basePath) { // 保存 visible_faces_map std::ofstream mapFile(basePath + "_visible_faces_map.txt"); if (mapFile.is_open()) { for (const auto& entry : visible_faces_map) { mapFile << entry.first; for (int face : entry.second) { mapFile << " " << face; } mapFile << "\n"; } mapFile.close(); } // 保存 face_visible_relative std::ofstream relativeFile(basePath + "_face_visible_relative.txt"); if (relativeFile.is_open()) { for (int face : face_visible_relative) { relativeFile << face << "\n"; } relativeFile.close(); } std::ofstream mapFile2(basePath + "_edge_faces_map.txt"); if (mapFile2.is_open()) { for (const auto& entry : edge_faces_map) { mapFile2 << entry.first; for (int face : entry.second) { mapFile2 << " " << face; } mapFile2 << "\n"; } mapFile2.close(); } std::ofstream mapFile3(basePath + "_delete_edge_faces_map.txt"); if (mapFile3.is_open()) { for (const auto& entry : delete_edge_faces_map) { mapFile3 << entry.first; for (int face : entry.second) { mapFile3 << " " << face; } mapFile3 << "\n"; } mapFile3.close(); } } // 从文件加载遮挡数据 bool Scene::LoadVisibleFacesData(std::map>& visible_faces_map, std::unordered_set& face_visible_relative, std::map>& edge_faces_map, std::map>& delete_edge_faces_map, std::string& basePath) { printf("LoadVisibleFacesData %s\n", basePath.c_str()); std::ifstream mapFile(basePath + "_visible_faces_map.txt"); if (!mapFile.is_open()) { return false; } std::string line; while (std::getline(mapFile, line)) { std::istringstream iss(line); std::string image_name; iss >> image_name; std::unordered_set faces; int face_index; while (iss >> face_index) { faces.insert(face_index); } visible_faces_map[image_name] = faces; } mapFile.close(); std::ifstream relativeFile(basePath + "_face_visible_relative.txt"); if (!relativeFile.is_open()) { return false; } while (std::getline(relativeFile, line)) { int face_index = std::stoi(line); face_visible_relative.insert(face_index); } relativeFile.close(); std::ifstream mapFile2(basePath + "_edge_faces_map.txt"); if (!mapFile2.is_open()) { return false; } while (std::getline(mapFile2, line)) { std::istringstream iss(line); std::string image_name; iss >> image_name; std::unordered_set faces; int face_index; while (iss >> face_index) { faces.insert(face_index); } edge_faces_map[image_name] = faces; } mapFile2.close(); std::ifstream mapFile3(basePath + "_delete_edge_faces_map.txt"); if (!mapFile3.is_open()) { return false; } while (std::getline(mapFile3, line)) { std::istringstream iss(line); std::string image_name; iss >> image_name; std::unordered_set faces; int face_index; while (iss >> face_index) { faces.insert(face_index); } delete_edge_faces_map[image_name] = faces; } mapFile3.close(); return true; } // texture mesh // - minCommonCameras: generate texture patches using virtual faces composed of coplanar triangles sharing at least this number of views (0 - disabled, 3 - good value) // - fSharpnessWeight: sharpness weight to be applied on the texture (0 - disabled, 0.5 - good value) // - nIgnoreMaskLabel: label value to ignore in the image mask, stored in the MVS scene or next to each image with '.mask.png' extension (-1 - auto estimate mask for lens distortion, -2 - disabled) bool Scene::TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int nIgnoreMaskLabel, int maxTextureSize, const IIndexArr& views, const SEACAVE::String& baseFileName, bool bOriginFaceview, const std::string& inputFileName, const std::string& meshFileName, bool bUseExistingUV, const std::string& strUVMeshFileName) { nTextureSizeMultiple = 8192; // 8192 4096 // if (!bOriginFaceview && !bUseExistingUV) if (!bOriginFaceview) { // 确保网格拓扑结构已计算 if (mesh.faceFaces.empty()) { mesh.ListIncidenteFaces(); // 计算顶点-面邻接关系 mesh.ListIncidenteFaceFaces(); // 计算面-面邻接关系 } // 确保法向量已计算 if (mesh.faceNormals.empty()) { mesh.ComputeNormalFaces(); // 计算面法向量 } // 确保顶点边界信息已计算 if (mesh.vertexBoundary.empty()) { mesh.ListBoundaryVertices(); // 计算边界顶点 } Mesh::FaceIdxArr regionMap; this->SegmentMeshBasedOnCurvature(regionMap, 0.2f); // 曲率阈值设为0.2 } MeshTexture texture(*this, nResolutionLevel, nMinResolution); // printf("baseFileName=%s\n", baseFileName.c_str()); /* std::filesystem::path path(baseFileName.c_str()); std::string parentPath = path.parent_path().string(); // 获取父目录 String altTexPath = String(parentPath) + "/mesh_material_0_map_Kd2.png"; printf("altTexPath=%s\n", altTexPath.c_str()); // 加载备用纹理 Image8U3 altTex; if (!altTex.Load(altTexPath)) { // 如果加载失败,可以输出警告,但不中断流程 DEBUG_EXTRA("Warning: Failed to load alternative texture mesh_material_0_map_Kd2.png"); } else { texture.alternativeTexture = &altTex; } //*/ std::string id; // 1. 查找最后一个 '/' 的位置 size_t last_slash = baseFileName.rfind('/'); if (last_slash == std::string::npos) { id = baseFileName; // 无斜杠时返回原字符串 } else { // 2. 查找倒数第二个 '/' 的位置(在 last_slash 之前) size_t second_last_slash = baseFileName.rfind('/', last_slash - 1); if (second_last_slash == std::string::npos) { id = baseFileName.substr(0, last_slash); // 不足两个斜杠时返回第一个斜杠前的内容 } else { // 3. 查找倒数第三个 '/' 的位置(在 second_last_slash 之前) size_t third_last_slash = baseFileName.rfind('/', second_last_slash - 1); if (third_last_slash == std::string::npos) { id = baseFileName.substr(0, second_last_slash); // 不足三个斜杠时返回开头到倒数第二个斜杠的内容 } else { // 4. 截取倒数第三个和倒数第二个斜杠之间的子串 id = baseFileName.substr(third_last_slash + 1, second_last_slash - third_last_slash - 1); } } } printf("id=%s\n", id.c_str()); #ifdef MASK_FACE_OCCLUSION //* fs::path p(baseFileName.c_str()); // 2. 获取父路径 (e.g., /path/to/data/scene) fs::path parent = p.parent_path(); // 4. 转换为字符串,并附加一个路径分隔符 // ( / "" 会自动处理,确保 /path/to/data 变为 /path/to/data/ ) std::string basePath = (parent / "").string(); //*/ /* std::string basePath = ""; size_t lastSlash = baseFileName.find_last_of('/'); size_t secondLastSlash = baseFileName.find_last_of('/', lastSlash - 1); if (secondLastSlash == std::string::npos) basePath = baseFileName; basePath = baseFileName.substr(0, secondLastSlash + 1); /*/ // printf("basePath=%s\n", basePath.c_str()); if (!LoadVisibleFacesData(visible_faces_map, face_visible_relative, edge_faces_map, delete_edge_faces_map, basePath)) { printf("LoadVisibleFacesData error\n"); } #endif // assign the best view to each face { TD_TIMER_STARTD(); if (bOriginFaceview) { if (!texture.FaceViewSelection(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views)) return false; } else { // if (!texture.FaceViewSelection3(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views)) // return false; // 第一轮处理:使用虚拟面映射,跳过无效面片 // if (false) { TD_TIMER_STARTD(); if (!texture.FaceViewSelection3(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views, bUseExistingUV)) return false; DEBUG_EXTRA("First pass (virtual faces) completed: %u faces (%s)", mesh.faces.size(), TD_TIMER_GET_FMT().c_str()); } // 第二轮处理:专门处理无效面片(非虚拟面映射) if (false) { TD_TIMER_STARTD(); // 收集所有无效面片的索引 Mesh::FaceIdxArr invalidFaces; for (FIndex fid = 0; fid < mesh.faces.size(); ++fid) { if (texture.scene.mesh.invalidFaces.data.contains(fid)) { invalidFaces.push_back(fid); } } if (!invalidFaces.empty()) { // 创建新的纹理处理器,专门处理无效面片 MeshTexture textureInvalid(*this, nResolutionLevel, nMinResolution); // 使用非虚拟面模式处理无效面片 if (!textureInvalid.FaceViewSelection4(0, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views, &invalidFaces)) { return false; } // 合并两轮处理的结果 texture.texturePatches.Join(textureInvalid.texturePatches); texture.seamEdges.Join(textureInvalid.seamEdges); } DEBUG_EXTRA("Second pass (invalid faces) completed: %u faces (%s)", invalidFaces.size(), TD_TIMER_GET_FMT().c_str()); } } DEBUG_EXTRA("Assigning the best view to each face completed: %u faces (%s)", mesh.faces.size(), TD_TIMER_GET_FMT().c_str()); } // mesh.CheckUVValid(); DEBUG_EXTRA("TextureMesh %b, %s", bUseExistingUV, strUVMeshFileName.c_str()); if (bUseExistingUV && !strUVMeshFileName.empty()) { // 1. 生成临时纹理和UV Mesh::TexCoordArr existingTexcoords; Mesh::TexIndexArr existingTexindices; // texture.GenerateTextureForUV(bGlobalSeamLeveling, bLocalSeamLeveling, nTextureSizeMultiple, // nRectPackingHeuristic, colEmpty, fSharpnessWeight, maxTextureSize, // baseFileName, bOriginFaceview, this, existingTexcoords, existingTexindices); if (!texture.GenerateTextureWithViewConsistency( bGlobalSeamLeveling, bLocalSeamLeveling, nTextureSizeMultiple, nRectPackingHeuristic, colEmpty, fSharpnessWeight, maxTextureSize, baseFileName, bOriginFaceview, this)) { return false; } // 保存生成的纹理数据 Mesh::Image8U3Arr existingTextures = texture.texturesDiffuseTemp; VERBOSE("1faceTexcoords.size=%d, faces.size=%d", mesh.faceTexcoords.size(), mesh.faces.size() * 3); // 2. 使用预计算UV模式加载网格 if (!mesh.Load(MAKE_PATH_SAFE(strUVMeshFileName), true)) { VERBOSE("error: cannot load mesh file with UV coordinates"); return false; } VERBOSE("2faceTexcoords.size=%d, faces.size=%d", mesh.faceTexcoords.size(), mesh.faces.size() * 3); if (mesh.faceTexcoords.empty()) { VERBOSE("error: the specified mesh does not contain UV coordinates"); return false; } // 3. 使用新的纹理生成方法 MeshTexture texture2(*this, nResolutionLevel, nMinResolution); // 关键:确保几何信息正确 texture2.scene.mesh.vertices = mesh.vertices; texture2.scene.mesh.faces = mesh.faces; texture2.scene.mesh.faceTexcoords = mesh.faceTexcoords; // 使用外部UV if (!texture2.TextureWithExistingUV(views, nIgnoreMaskLabel, fOutlierThreshold, nTextureSizeMultiple, colEmpty, fSharpnessWeight, existingTextures, existingTexcoords, existingTexindices)) { return false; } // 4. 将生成的纹理赋值回场景网格 mesh.texturesDiffuse = std::move(texture2.scene.mesh.texturesDiffuse); mesh.faceTexindices = std::move(texture2.scene.mesh.faceTexindices); return true; } // mesh.CheckUVValid(); // generate the texture image and atlas { TD_TIMER_STARTD(); texture.GenerateTexture(bGlobalSeamLeveling, bLocalSeamLeveling, nTextureSizeMultiple, nRectPackingHeuristic, colEmpty, fSharpnessWeight, maxTextureSize, baseFileName, bOriginFaceview, this); DEBUG_EXTRA("Generating texture atlas and image completed: %u patches, %u image size, %u textures (%s)", texture.texturePatches.size(), mesh.texturesDiffuse[0].width(), mesh.texturesDiffuse.size(), TD_TIMER_GET_FMT().c_str()); } // mesh.CheckUVValid(); return true; } // TextureMesh std::string Scene::runPython(const std::string& command) { std::array buffer{}; std::string result; std::unique_ptr pipe(popen(command.c_str(), "r"), pclose); if (!pipe) throw std::runtime_error("popen() failed!"); while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) { result += buffer.data(); } return result; } bool Scene::is_face_visible(const std::string& image_name, int face_index) { #ifndef MASK_FACE_OCCLUSION return true; #endif auto it = visible_faces_map.find(image_name); if (it != visible_faces_map.end()) { return it->second.find(face_index) != it->second.end(); } return false; } bool Scene::is_face_visible_relative(int face_index) { #ifndef MASK_FACE_OCCLUSION return true; #endif return face_visible_relative.contains(face_index); } bool Scene::is_face_edge(const std::string& image_name, int face_index) { #ifndef MASK_FACE_OCCLUSION return true; #endif auto it = edge_faces_map.find(image_name); if (it != edge_faces_map.end()) { // printf("is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { // std::cout << *it2 << " "; } // std::cout << std::endl; // if (it->second.find(face_index) != it->second.end()) // printf("find is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); return it->second.find(face_index) != it->second.end(); } return false; } bool Scene::is_face_delete_edge(const std::string& image_name, int face_index) { #ifndef MASK_FACE_OCCLUSION return true; #endif auto it = delete_edge_faces_map.find(image_name); if (it != delete_edge_faces_map.end()) { // printf("is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { // std::cout << *it2 << " "; } // std::cout << std::endl; // if (it->second.find(face_index) != it->second.end()) // printf("find is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); return it->second.find(face_index) != it->second.end(); } return false; } void Scene::SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold) { // 确保网格数据有效 if (mesh.faces.empty() || mesh.vertices.empty() || mesh.faceFaces.size() != mesh.faces.size() || mesh.faceNormals.size() != mesh.faces.size()) { regionMap.resize(mesh.faces.size()); regionMap.Memset(0); // 默认全部设为区域0 return; } regionMap.resize(mesh.faces.size()); const size_t numFaces = mesh.faces.size(); for (size_t fid = 0; fid < numFaces; ++fid) { // 检查面索引是否有效 if (fid >= mesh.faces.size()) continue; const Mesh::Face& f = mesh.faces[fid]; // 检查顶点索引是否有效 if (f[0] >= mesh.vertices.size() || f[1] >= mesh.vertices.size() || f[2] >= mesh.vertices.size()) { regionMap[fid] = 0; continue; } const Mesh::Vertex v0 = mesh.vertices[f[0]]; const Mesh::Vertex v1 = mesh.vertices[f[1]]; const Mesh::Vertex v2 = mesh.vertices[f[2]]; // 计算面法向量 const Mesh::Vertex edge1 = v1 - v0; const Mesh::Vertex edge2 = v2 - v0; Mesh::Normal normal; normal.x = edge1.y * edge2.z - edge1.z * edge2.y; normal.y = edge1.z * edge2.x - edge1.x * edge2.z; normal.z = edge1.x * edge2.y - edge1.y * edge2.x; const float length = std::sqrt(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); if (length > FLT_EPSILON) { normal.x /= length; normal.y /= length; normal.z /= length; } float curvature = 0.0f; int count = 0; // 检查面邻接关系是否有效 if (fid < mesh.faceFaces.size()) { for (int i = 0; i < 3; ++i) { const Mesh::FaceFaces::Type adjFaceIdx = mesh.faceFaces[fid][i]; if (adjFaceIdx == NO_ID) continue; // 检查邻接面索引是否有效 if (adjFaceIdx < mesh.faceNormals.size()) { const Mesh::Normal& adjNormal = mesh.faceNormals[adjFaceIdx]; const float dot = normal.x * adjNormal.x + normal.y * adjNormal.y + normal.z * adjNormal.z; curvature += 1.0f - std::abs(dot); count++; } } } if (count > 0) { curvature /= count; } regionMap[fid] = (curvature > curvatureThreshold) ? 1 : 0; } } /*----------------------------------------------------------------*/