You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
9855 lines
351 KiB
9855 lines
351 KiB
/* |
|
* SceneTexture.cpp |
|
* |
|
* Copyright (c) 2014-2015 SEACAVE |
|
* |
|
* Author(s): |
|
* |
|
* cDc <cdc.seacave@gmail.com> |
|
* |
|
* |
|
* This program is free software: you can redistribute it and/or modify |
|
* it under the terms of the GNU Affero General Public License as published by |
|
* the Free Software Foundation, either version 3 of the License, or |
|
* (at your option) any later version. |
|
* |
|
* This program is distributed in the hope that it will be useful, |
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
* GNU Affero General Public License for more details. |
|
* |
|
* You should have received a copy of the GNU Affero General Public License |
|
* along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
* |
|
* |
|
* Additional Terms: |
|
* |
|
* You are required to preserve legal notices and author attributions in |
|
* that material or in the Appropriate Legal Notices displayed by works |
|
* containing it. |
|
*/ |
|
|
|
#include "Common.h" |
|
#include "Scene.h" |
|
#include "RectsBinPack.h" |
|
// connected components |
|
#include <boost/graph/adjacency_list.hpp> |
|
#include <boost/graph/connected_components.hpp> |
|
#include <opencv2/ximgproc.hpp> |
|
|
|
// #include <pybind11/embed.h> |
|
// #include <pybind11/stl.h> |
|
#include "ConfigEnv.h" |
|
#include "cuda/MeshTextureCUDA.h" |
|
#include <sstream> |
|
#include <filesystem> |
|
|
|
namespace fs = std::filesystem; |
|
|
|
// namespace py = pybind11; |
|
|
|
using namespace MVS; |
|
|
|
// D E F I N E S /////////////////////////////////////////////////// |
|
|
|
// uncomment to enable multi-threading based on OpenMP |
|
#ifdef _USE_OPENMP |
|
#define TEXOPT_USE_OPENMP |
|
#endif |
|
|
|
// uncomment to use SparseLU for solving the linear systems |
|
// (should be faster, but not working on old Eigen) |
|
#if !defined(EIGEN_DEFAULT_TO_ROW_MAJOR) || EIGEN_WORLD_VERSION>3 || (EIGEN_WORLD_VERSION==3 && EIGEN_MAJOR_VERSION>2) |
|
#define TEXOPT_SOLVER_SPARSELU |
|
#endif |
|
|
|
// method used to try to detect outlier face views |
|
// (should enable more consistent textures, but it is not working) |
|
#define TEXOPT_FACEOUTLIER_NA 0 |
|
#define TEXOPT_FACEOUTLIER_MEDIAN 1 |
|
#define TEXOPT_FACEOUTLIER_GAUSS_DAMPING 2 |
|
#define TEXOPT_FACEOUTLIER_GAUSS_CLAMPING 3 |
|
#define TEXOPT_FACEOUTLIER TEXOPT_FACEOUTLIER_GAUSS_CLAMPING |
|
|
|
// method used to find optimal view per face |
|
#define TEXOPT_INFERENCE_LBP 1 |
|
#define TEXOPT_INFERENCE_TRWS 2 |
|
#define TEXOPT_INFERENCE TEXOPT_INFERENCE_LBP |
|
#define MASK_FACE_OCCLUSION |
|
// #define USE_CUDA |
|
|
|
// inference algorithm |
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
#include "../Math/LBP.h" |
|
namespace MVS { |
|
typedef LBPInference::NodeID NodeID; |
|
// Potts model as smoothness function |
|
LBPInference::EnergyType STCALL SmoothnessPotts(LBPInference::NodeID, LBPInference::NodeID, LBPInference::LabelID l1, LBPInference::LabelID l2) { |
|
return l1 == l2 && l1 != 0 && l2 != 0 ? LBPInference::EnergyType(0) : LBPInference::EnergyType(LBPInference::MaxEnergy); |
|
} |
|
LBPInference::EnergyType STCALL SmoothnessLinear(LBPInference::NodeID, LBPInference::NodeID, LBPInference::LabelID l1, LBPInference::LabelID l2) { |
|
return std::abs((int)l1 - (int)l2) * 0.5f * LBPInference::MaxEnergy; |
|
} |
|
LBPInference::EnergyType STCALL NewSmoothness(LBPInference::NodeID, LBPInference::NodeID, |
|
LBPInference::LabelID l1, LBPInference::LabelID l2) { |
|
if(l1 == l2) return 0; |
|
if(l1 == 0 || l2 == 0) return LBPInference::MaxEnergy; // 保持与undefined的硬约束 |
|
return LBPInference::EnergyType(0.3f * LBPInference::MaxEnergy); // 降低不同视角间的惩罚 |
|
} |
|
} |
|
#endif |
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS |
|
#include "../Math/TRWS/MRFEnergy.h" |
|
namespace MVS { |
|
// TRWS MRF energy using Potts model |
|
typedef unsigned NodeID; |
|
typedef unsigned LabelID; |
|
typedef TypePotts::REAL EnergyType; |
|
static const EnergyType MaxEnergy(1); |
|
struct TRWSInference { |
|
typedef MRFEnergy<TypePotts> MRFEnergyType; |
|
typedef MRFEnergy<TypePotts>::Options MRFOptions; |
|
|
|
CAutoPtr<MRFEnergyType> mrf; |
|
CAutoPtrArr<MRFEnergyType::NodeId> nodes; |
|
|
|
inline TRWSInference() {} |
|
void Init(NodeID nNodes, LabelID nLabels) { |
|
mrf = new MRFEnergyType(TypePotts::GlobalSize(nLabels)); |
|
nodes = new MRFEnergyType::NodeId[nNodes]; |
|
} |
|
inline bool IsEmpty() const { |
|
return mrf == NULL; |
|
} |
|
inline void AddNode(NodeID n, const EnergyType* D) { |
|
nodes[n] = mrf->AddNode(TypePotts::LocalSize(), TypePotts::NodeData(D)); |
|
} |
|
inline void AddEdge(NodeID n1, NodeID n2) { |
|
mrf->AddEdge(nodes[n1], nodes[n2], TypePotts::EdgeData(MaxEnergy)); |
|
} |
|
EnergyType Optimize() { |
|
MRFOptions options; |
|
options.m_eps = 0.005; |
|
options.m_iterMax = 1000; |
|
#if 1 |
|
EnergyType lowerBound, energy; |
|
mrf->Minimize_TRW_S(options, lowerBound, energy); |
|
#else |
|
EnergyType energy; |
|
mrf->Minimize_BP(options, energy); |
|
#endif |
|
return energy; |
|
} |
|
inline LabelID GetLabel(NodeID n) const { |
|
return mrf->GetSolution(nodes[n]); |
|
} |
|
}; |
|
} |
|
#endif |
|
|
|
// S T R U C T S /////////////////////////////////////////////////// |
|
|
|
typedef Mesh::Vertex Vertex; |
|
typedef Mesh::VIndex VIndex; |
|
typedef Mesh::Face Face; |
|
typedef Mesh::FIndex FIndex; |
|
typedef Mesh::TexCoord TexCoord; |
|
typedef Mesh::TexIndex TexIndex; |
|
|
|
typedef int MatIdx; |
|
typedef Eigen::Triplet<float,MatIdx> MatEntry; |
|
typedef Eigen::SparseMatrix<float,Eigen::ColMajor,MatIdx> SparseMat; |
|
|
|
enum Mask { |
|
empty = 0, |
|
border = 128, |
|
interior = 255 |
|
}; |
|
|
|
struct MeshTexture { |
|
// used to render the surface to a view camera |
|
typedef TImage<cuint32_t> FaceMap; |
|
struct RasterMesh : TRasterMesh<RasterMesh> { |
|
typedef TRasterMesh<RasterMesh> Base; |
|
FaceMap& faceMap; |
|
FIndex idxFace; |
|
Image8U mask; |
|
bool validFace; |
|
// Mesh _mesh; |
|
Mesh& _mesh; |
|
MeshTexture& meshTexture; |
|
DepthMap& depthMap; |
|
bool bProcessConsist = false; |
|
IIndex _idxView; |
|
|
|
// RasterMesh(MeshTexture& _meshTexture, const Mesh::VertexArr& _vertices, const Camera& _camera, DepthMap& _depthMap, FaceMap& _faceMap, Mesh mesh, bool bProcessConsist) |
|
// : Base(_vertices, _camera, _depthMap), meshTexture(_meshTexture), faceMap(_faceMap), _mesh(mesh), depthMap(_depthMap), bProcessConsist(bProcessConsist){} |
|
RasterMesh(MeshTexture& _meshTexture, const Mesh::VertexArr& _vertices, const Camera& _camera, DepthMap& _depthMap, FaceMap& _faceMap, const std::reference_wrapper<Mesh> meshWrapper, bool bProcessConsist) |
|
: Base(_vertices, _camera, _depthMap), meshTexture(_meshTexture), faceMap(_faceMap), _mesh(meshWrapper.get()), depthMap(_depthMap), bProcessConsist(bProcessConsist){} |
|
void Clear() { |
|
Base::Clear(); |
|
faceMap.memset((uint8_t)NO_ID); |
|
} |
|
void Raster(const ImageRef& pt, const Triangle& t, const Point3f& bary) { |
|
const Point3f pbary(PerspectiveCorrectBarycentricCoordinates(t, bary)); |
|
const Depth z(ComputeDepth(t, pbary)); |
|
ASSERT(z > Depth(0)); |
|
Depth& depth = depthMap(pt); |
|
|
|
// meshTexture.PerformLocalDepthConsistencyCheck(depthMap, faceMap, _mesh); |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
// printf("depthMap(r, c)=%f\n", depthMap(r, c)); |
|
} |
|
} |
|
|
|
std::lock_guard<std::mutex> lock(*_mesh.invalidFaces.mtx); |
|
if (_mesh.invalidFaces.data.find(idxFace) != _mesh.invalidFaces.data.end()) { |
|
// validFace = false; // 标记为无效面片 |
|
// return; // 跳过渲染 |
|
} |
|
|
|
if (bProcessConsist && false) |
|
{ |
|
float depthThreshold = 900.0f; // 0.1 |
|
// printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); |
|
// if (std::abs(depthMap(pt))!=0.0) |
|
// printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); |
|
// if (depthMap(pt)>600) |
|
// printf("depthMap(pt)=%f, z=%f\n", depthMap(pt), z); |
|
// if (depthMap(pt) != 0 && std::abs(depthMap(pt) - z) > depthThreshold) { |
|
// if (depthMap(pt) != 0) { |
|
// if (depthMap(pt) != 0 && std::abs(depthMap(pt) - z) < depthThreshold) { |
|
// if (depthMap(pt) == 0) { |
|
if (depth != 0 && std::abs(depth - z) > depthThreshold) |
|
{ |
|
validFace = false; // 标记为无效面片 |
|
|
|
// 标记相邻面片无效(扩展范围) |
|
int expansionRadius = 30; // 周围3个面片范围 |
|
std::queue<FIndex> faceQueue; |
|
std::unordered_set<FIndex> processedFaces; |
|
|
|
faceQueue.push(idxFace); |
|
processedFaces.insert(idxFace); |
|
while (!faceQueue.empty() && expansionRadius-- > 0) { |
|
FIndex currentFace = faceQueue.front(); |
|
faceQueue.pop(); |
|
|
|
// 获取当前面片的相邻面片 |
|
const Mesh::FaceFaces& adjFaces = _mesh.faceFaces[currentFace]; |
|
for (int i = 0; i < 3; ++i) { |
|
FIndex neighborFace = adjFaces[i]; |
|
if (neighborFace == NO_ID || processedFaces.find(neighborFace) != processedFaces.end()) |
|
continue; |
|
|
|
// 标记相邻面片无效 |
|
processedFaces.insert(neighborFace); |
|
faceQueue.push(neighborFace); |
|
|
|
// 在全局数组中标记为无效 |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical |
|
#endif |
|
{ |
|
std::lock_guard<std::mutex> lock(*_mesh.invalidFaces.mtx); |
|
_mesh.invalidFaces.data.insert(neighborFace); |
|
// _mesh.invalidFacesAll[_idxView].data.insert(neighborFace); |
|
} |
|
} |
|
} |
|
|
|
return; |
|
} |
|
} |
|
|
|
if (depth == 0 || depth > z) |
|
{ |
|
depth = z; |
|
// printf("depth=%f\n",depth); |
|
faceMap(pt) = validFace && (validFace = (mask(pt) != 0)) ? idxFace : NO_ID; |
|
} |
|
} |
|
}; |
|
|
|
// used to represent a pixel color |
|
typedef Point3f Color; |
|
typedef CLISTDEF0(Color) Colors; |
|
|
|
// cList<DepthMap> viewDepthMaps; // 存储每个视图的深度图 |
|
|
|
// used to store info about a face (view, quality) |
|
struct FaceData { |
|
IIndex idxView;// the view seeing this face |
|
float quality; // how well the face is seen by this view |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
Color color; // additionally store mean color (used to remove outliers) |
|
#endif |
|
bool bInvalidFacesRelative = false; |
|
}; |
|
typedef cList<FaceData,const FaceData&,0,8,uint32_t> FaceDataArr; // store information about one face seen from several views |
|
typedef cList<FaceDataArr,const FaceDataArr&,2,1024,FIndex> FaceDataViewArr; // store data for all the faces of the mesh |
|
|
|
typedef cList<Mesh::FaceIdxArr, const Mesh::FaceIdxArr&,2,1024, FIndex> VirtualFaceIdxsArr; // store face indices for each virtual face |
|
|
|
// used to assign a view to a face |
|
typedef uint32_t Label; |
|
typedef cList<Label,Label,0,1024,FIndex> LabelArr; |
|
|
|
// represents a texture patch |
|
struct TexturePatch { |
|
Label label; // view index |
|
Mesh::FaceIdxArr faces; // indices of the faces contained by the patch |
|
RectsBinPack::Rect rect; // the bounding box in the view containing the patch |
|
}; |
|
typedef cList<TexturePatch,const TexturePatch&,1,1024,FIndex> TexturePatchArr; |
|
|
|
// used to optimize texture patches |
|
struct SeamVertex { |
|
struct Patch { |
|
struct Edge { |
|
uint32_t idxSeamVertex; // the other vertex of this edge |
|
FIndex idxFace; // the face containing this edge in this patch |
|
|
|
inline Edge() {} |
|
inline Edge(uint32_t _idxSeamVertex) : idxSeamVertex(_idxSeamVertex) {} |
|
inline bool operator == (uint32_t _idxSeamVertex) const { |
|
return (idxSeamVertex == _idxSeamVertex); |
|
} |
|
}; |
|
typedef cList<Edge,const Edge&,0,4,uint32_t> Edges; |
|
|
|
uint32_t idxPatch; // the patch containing this vertex |
|
Point2f proj; // the projection of this vertex in this patch |
|
Edges edges; // the edges starting from this vertex, contained in this patch (exactly two for manifold meshes) |
|
|
|
inline Patch() {} |
|
inline Patch(uint32_t _idxPatch) : idxPatch(_idxPatch) {} |
|
inline bool operator == (uint32_t _idxPatch) const { |
|
return (idxPatch == _idxPatch); |
|
} |
|
}; |
|
typedef cList<Patch,const Patch&,1,4,uint32_t> Patches; |
|
|
|
VIndex idxVertex; // the index of this vertex |
|
Patches patches; // the patches meeting at this vertex (two or more) |
|
|
|
inline SeamVertex() {} |
|
inline SeamVertex(uint32_t _idxVertex) : idxVertex(_idxVertex) {} |
|
inline bool operator == (uint32_t _idxVertex) const { |
|
return (idxVertex == _idxVertex); |
|
} |
|
Patch& GetPatch(uint32_t idxPatch) { |
|
const uint32_t idx(patches.Find(idxPatch)); |
|
if (idx == NO_ID) |
|
return patches.emplace_back(idxPatch); |
|
return patches[idx]; |
|
} |
|
inline void SortByPatchIndex(IndexArr& indices) const { |
|
indices.resize(patches.size()); |
|
std::iota(indices.Begin(), indices.End(), 0); |
|
std::sort(indices.Begin(), indices.End(), [&](IndexArr::Type i0, IndexArr::Type i1) -> bool { |
|
return patches[i0].idxPatch < patches[i1].idxPatch; |
|
}); |
|
} |
|
}; |
|
typedef cList<SeamVertex,const SeamVertex&,1,256,uint32_t> SeamVertices; |
|
|
|
// used to iterate vertex labels |
|
struct PatchIndex { |
|
bool bIndex; |
|
union { |
|
uint32_t idxPatch; |
|
uint32_t idxSeamVertex; |
|
}; |
|
}; |
|
typedef CLISTDEF0(PatchIndex) PatchIndices; |
|
struct VertexPatchIterator { |
|
uint32_t idx; |
|
uint32_t idxPatch; |
|
const SeamVertex::Patches* pPatches; |
|
inline VertexPatchIterator(const PatchIndex& patchIndex, const SeamVertices& seamVertices) : idx(NO_ID) { |
|
if (patchIndex.bIndex) { |
|
pPatches = &seamVertices[patchIndex.idxSeamVertex].patches; |
|
} else { |
|
idxPatch = patchIndex.idxPatch; |
|
pPatches = NULL; |
|
} |
|
} |
|
inline operator uint32_t () const { |
|
return idxPatch; |
|
} |
|
inline bool Next() { |
|
if (pPatches == NULL) |
|
return (idx++ == NO_ID); |
|
if (++idx >= pPatches->size()) |
|
return false; |
|
idxPatch = (*pPatches)[idx].idxPatch; |
|
return true; |
|
} |
|
}; |
|
|
|
mutable FloatArr meshCurvatures; // 存储每个面的曲率值 |
|
void ComputeFaceCurvatures() const; |
|
|
|
const Image8U3* alternativeTexture; // 备用纹理指针 |
|
|
|
// used to sample seam edges |
|
typedef TAccumulator<Color> AccumColor; |
|
typedef Sampler::Linear<float> Sampler; |
|
struct SampleImage { |
|
AccumColor accumColor; |
|
const Image8U3& image; |
|
const Sampler sampler; |
|
|
|
inline SampleImage(const Image8U3& _image) : image(_image), sampler() {} |
|
// sample the edge with linear weights |
|
void AddEdge(const TexCoord& p0, const TexCoord& p1) { |
|
const TexCoord p01(p1 - p0); |
|
const float length(norm(p01)); |
|
ASSERT(length > 0.f); |
|
const int nSamples(ROUND2INT(MAXF(length, 1.f) * 2.f)-1); |
|
AccumColor edgeAccumColor; |
|
for (int s=0; s<nSamples; ++s) { |
|
const float len(static_cast<float>(s) / nSamples); |
|
const TexCoord samplePos(p0 + p01 * len); |
|
const Color color(image.sample<Sampler,Color>(sampler, samplePos)); |
|
edgeAccumColor.Add(RGB2YCBCR(color), 1.f-len); |
|
} |
|
accumColor.Add(edgeAccumColor.Normalized(), length); |
|
} |
|
// returns accumulated color |
|
Color GetColor() const { |
|
return accumColor.Normalized(); |
|
} |
|
}; |
|
|
|
// used to interpolate adjustments color over the whole texture patch |
|
typedef TImage<Color> ColorMap; |
|
|
|
/* |
|
struct ColorF { |
|
float r, g, b, a; |
|
ColorF() : r(0), g(0), b(0), a(0) {} |
|
ColorF(float _r, float _g, float _b, float _a=1.0f) |
|
: r(_r), g(_g), b(_b), a(_a) {} |
|
}; |
|
*/ |
|
|
|
public: |
|
MeshTexture(Scene& _scene, unsigned _nResolutionLevel=0, unsigned _nMinResolution=640); |
|
~MeshTexture(); |
|
|
|
void ListVertexFaces(); |
|
|
|
bool ListCameraFaces(FaceDataViewArr&, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& views, bool bUseVirtualFaces); |
|
bool CheckInvalidFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces); |
|
bool IsFaceVisibleAndValid(const FaceDataArr& faceDatas, const IIndexArr& selectedCams) const; |
|
|
|
std::unordered_set<FIndex> PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh, IIndex idxView, std::string strViewName); |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
bool FaceOutlierDetection(FaceDataArr& faceDatas, float fOutlierThreshold) const; |
|
#endif |
|
|
|
void CreateVirtualFaces(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; |
|
void CreateVirtualFaces2(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, const Mesh::FaceIdxArr& faceIndices, float thMaxNormalDeviation=25.f) const; |
|
void CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; |
|
void CreateVirtualFaces4(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, Mesh::FaceIdxArr& mapFaceToVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f); |
|
void CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; |
|
bool CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; |
|
bool CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const; |
|
IIndexArr SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; |
|
IIndexArr SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const; |
|
|
|
bool FaceViewSelection(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); |
|
bool FaceViewSelection2(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); |
|
bool FaceViewSelection3(unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views); |
|
bool FaceViewSelection4( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views, const Mesh::FaceIdxArr* faceIndices = nullptr); |
|
void CreateAdaptiveVirtualFaces(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras); |
|
bool ShouldMergeVirtualFace(const MeshTexture::FaceDataViewArr& facesDatas, const Mesh::FaceIdxArr& currentVirtualFace, FIndex candidateFace, unsigned minCommonCameras); |
|
void CreateSeamVertices(); |
|
void GlobalSeamLeveling(); |
|
void GlobalSeamLeveling3(); |
|
void LocalSeamLeveling(); |
|
void LocalSeamLeveling3(); |
|
void GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& baseFileName, bool bOriginFaceview); |
|
void GenerateTexture2(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& baseFileName); |
|
|
|
// Bruce |
|
//* |
|
template <typename PIXEL> |
|
static inline PIXEL RGB2YCBCR(const PIXEL& v) { |
|
typedef typename PIXEL::Type T; |
|
return PIXEL( |
|
v[0] * T(0.299) + v[1] * T(0.587) + v[2] * T(0.114), |
|
v[0] * T(-0.168736) + v[1] * T(-0.331264) + v[2] * T(0.5) + T(128), |
|
v[0] * T(0.5) + v[1] * T(-0.418688) + v[2] * T(-0.081312) + T(128) |
|
); |
|
} |
|
template <typename PIXEL> |
|
static inline PIXEL YCBCR2RGB(const PIXEL& v) { |
|
typedef typename PIXEL::Type T; |
|
const T v1(v[1] - T(128)); |
|
const T v2(v[2] - T(128)); |
|
return PIXEL( |
|
v[0] + v2 * T(1.402), |
|
v[0] + v1 * T(-0.34414) + v2 * T(-0.71414), |
|
v[0] + v1 * T(1.772) |
|
); |
|
} |
|
|
|
static inline float GetLuminance(const Color& rgb) { |
|
Color ycbcr = MeshTexture::RGB2YCBCR(rgb); |
|
return ycbcr[0]; // Y分量就是亮度 |
|
} |
|
|
|
// 定义结构体用于封装颜色中值和亮度中值 |
|
struct MedianValues { |
|
Color color; |
|
float quality; |
|
}; |
|
|
|
// 修改函数,返回MedianValues结构体 |
|
static MedianValues ComputeMedianColorAndQuality(const std::vector<std::pair<float, Color>>& views) { |
|
std::vector<Color> colors; |
|
std::vector<float> qualities; // 新增:存储质量值 |
|
|
|
for (const auto& view : views) { |
|
qualities.push_back(view.first); // 收集质量值 |
|
colors.push_back(view.second); // 收集颜色值 |
|
} |
|
|
|
// 对每个颜色通道和质量分别排序 |
|
std::vector<float> r, g, b; |
|
for (const auto& color : colors) { |
|
r.push_back(color[0]); |
|
g.push_back(color[1]); |
|
b.push_back(color[2]); |
|
} |
|
|
|
std::sort(r.begin(), r.end()); |
|
std::sort(g.begin(), g.end()); |
|
std::sort(b.begin(), b.end()); |
|
std::sort(qualities.begin(), qualities.end()); // 对质量排序 |
|
|
|
const int mid = colors.size() / 2; |
|
|
|
MedianValues result; |
|
result.color = Color(r[mid], g[mid], b[mid]); // 颜色中值 |
|
result.quality = qualities[mid]; // 质量中值 |
|
|
|
return result; |
|
} |
|
|
|
// 计算亮度中值 |
|
static float ComputeMedianLuminance(const std::vector<std::pair<float, Color>>& views) { |
|
std::vector<float> luminances; |
|
for (const auto& view : views) { |
|
luminances.push_back(MeshTexture::GetLuminance(view.second)); |
|
} |
|
|
|
std::sort(luminances.begin(), luminances.end()); |
|
return luminances[luminances.size() / 2]; |
|
} |
|
|
|
// 计算颜色绝对中位差(MAD) |
|
static float ComputeColorMAD(const std::vector<std::pair<float, Color>>& views, const Color& median) { |
|
std::vector<float> distances; |
|
for (const auto& view : views) { |
|
distances.push_back(cv::norm(view.second - median)); |
|
} |
|
|
|
std::sort(distances.begin(), distances.end()); |
|
return distances[distances.size() / 2]; |
|
} |
|
|
|
// 计算亮度绝对中位差(MAD) |
|
static float ComputeLuminanceMAD(const std::vector<std::pair<float, Color>>& views, float medianLuminance) { |
|
std::vector<float> distances; |
|
for (const auto& view : views) { |
|
distances.push_back(std::abs(MeshTexture::GetLuminance(view.second) - medianLuminance)); |
|
} |
|
|
|
std::sort(distances.begin(), distances.end()); |
|
return distances[distances.size() / 2]; |
|
} |
|
|
|
//*/ |
|
/* |
|
// 采用ITU-R BT.601标准系数,增加数值稳定性处理 |
|
template <typename PIXEL> |
|
static inline PIXEL RGB2YCBCR(const PIXEL& v) { |
|
typedef typename PIXEL::Type T; |
|
const T y = 0.257f * v[0] + 0.504f * v[1] + 0.098f * v[2] + 16.0f; |
|
const T cb = -0.148f * v[0] - 0.291f * v[1] + 0.439f * v[2] + 128.0f; |
|
const T cr = 0.439f * v[0] - 0.368f * v[1] - 0.071f * v[2] + 128.0f; |
|
return PIXEL( |
|
std::clamp(y, T(16), T(235)), |
|
std::clamp(cb, T(16), T(240)), |
|
std::clamp(cr, T(16), T(240)) |
|
); |
|
} |
|
|
|
template <typename PIXEL> |
|
static inline PIXEL YCBCR2RGB(const PIXEL& v) { |
|
typedef typename PIXEL::Type T; |
|
const T y = std::max(v[0] - T(16), T(0)); |
|
const T cb = v[1] - T(128); |
|
const T cr = v[2] - T(128); |
|
|
|
const T r = 1.164f * y + 1.596f * cr; |
|
const T g = 1.164f * y - 0.392f * cb - 0.813f * cr; |
|
const T b = 1.164f * y + 2.017f * cb; |
|
|
|
return PIXEL( |
|
std::clamp(r, T(0), T(255)), |
|
std::clamp(g, T(0), T(255)), |
|
std::clamp(b, T(0), T(255)) |
|
); |
|
} |
|
*/ |
|
|
|
/* |
|
// 在MeshTexture类中添加标准转换函数 |
|
template <typename PIXEL> |
|
static inline PIXEL RGB2YCBCR(const PIXEL& v) { |
|
cv::Mat src(1, 1, CV_32FC3, const_cast<float*>(v.ptr())); |
|
cv::Mat dst; |
|
cv::cvtColor(src, dst, cv::COLOR_RGB2YCrCb); // 使用OpenCV标准转换 |
|
return PIXEL(dst.at<cv::Vec3f>(0)); |
|
} |
|
|
|
template <typename PIXEL> |
|
static inline PIXEL YCBCR2RGB(const PIXEL& v) { |
|
cv::Mat src(1, 1, CV_32FC3, const_cast<float*>(v.ptr())); |
|
cv::Mat dst; |
|
cv::cvtColor(src, dst, cv::COLOR_YCrCb2RGB); // 使用OpenCV标准转换 |
|
return PIXEL(dst.at<cv::Vec3f>(0)); |
|
} |
|
//*/ |
|
|
|
// Mesh::FaceIdxArr m_mapFaceToVirtualFace; |
|
|
|
protected: |
|
static void ProcessMask(Image8U& mask, int stripWidth); |
|
static void PoissonBlending(const Image32F3& src, Image32F3& dst, const Image8U& mask, float bias=1.f); |
|
|
|
|
|
public: |
|
const unsigned nResolutionLevel; // how many times to scale down the images before mesh optimization |
|
const unsigned nMinResolution; // how many times to scale down the images before mesh optimization |
|
|
|
// store found texture patches |
|
TexturePatchArr texturePatches; |
|
LabelArr labelsInvalid; |
|
|
|
// used to compute the seam leveling |
|
PairIdxArr seamEdges; // the (face-face) edges connecting different texture patches |
|
Mesh::FaceIdxArr components; // for each face, stores the texture patch index to which belongs |
|
IndexArr mapIdxPatch; // remap texture patch indices after invalid patches removal |
|
SeamVertices seamVertices; // array of vertices on the border between two or more patches |
|
|
|
// valid the entire time |
|
Mesh::VertexFacesArr& vertexFaces; // for each vertex, the list of faces containing it |
|
BoolArr& vertexBoundary; // for each vertex, stores if it is at the boundary or not |
|
Mesh::FaceFacesArr& faceFaces; // for each face, the list of adjacent faces, NO_ID for border edges (optional) |
|
Mesh::TexCoordArr& faceTexcoords; // for each face, the texture-coordinates of the vertices |
|
Mesh::TexIndexArr& faceTexindices; // for each face, the texture-coordinates of the vertices |
|
Mesh::Image8U3Arr& texturesDiffuse; // texture containing the diffuse color |
|
|
|
// constant the entire time |
|
Mesh::VertexArr& vertices; |
|
Mesh::FaceArr& faces; |
|
ImageArr& images; |
|
|
|
Scene& scene; // the mesh vertices and faces |
|
}; |
|
|
|
// creating an invalid mask for the given image corresponding to |
|
// the invalid pixels generated during image correction for the lens distortion; |
|
// the returned mask has the same size as the image and is set to zero for invalid pixels |
|
static Image8U DetectInvalidImageRegions(const Image8U3& image) |
|
{ |
|
const cv::Scalar upDiff(3); |
|
const int flags(8 | (255 << 8)); |
|
Image8U mask(image.rows + 2, image.cols + 2); |
|
mask.memset(0); |
|
Image8U imageGray; |
|
cv::cvtColor(image, imageGray, cv::COLOR_BGR2GRAY); |
|
if (imageGray(0, 0) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(0, 0), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(image.rows / 2, 0) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(0, image.rows / 2), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(image.rows - 1, 0) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(0, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(image.rows - 1, image.cols / 2) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(image.cols / 2, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(image.rows - 1, image.cols - 1) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, image.rows - 1), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(image.rows / 2, image.cols - 1) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, image.rows / 2), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(0, image.cols - 1) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(image.cols - 1, 0), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
if (imageGray(0, image.cols / 2) == 0) |
|
cv::floodFill(imageGray, mask, cv::Point(image.cols / 2, 0), 255, NULL, cv::Scalar(0), upDiff, flags); |
|
mask = (mask(cv::Rect(1,1, imageGray.cols,imageGray.rows)) == 0); |
|
return mask; |
|
} |
|
|
|
MeshTexture::MeshTexture(Scene& _scene, unsigned _nResolutionLevel, unsigned _nMinResolution) |
|
: |
|
nResolutionLevel(_nResolutionLevel), |
|
nMinResolution(_nMinResolution), |
|
vertexFaces(_scene.mesh.vertexFaces), |
|
vertexBoundary(_scene.mesh.vertexBoundary), |
|
faceFaces(_scene.mesh.faceFaces), |
|
faceTexcoords(_scene.mesh.faceTexcoords), |
|
faceTexindices(_scene.mesh.faceTexindices), |
|
texturesDiffuse(_scene.mesh.texturesDiffuse), |
|
vertices(_scene.mesh.vertices), |
|
faces(_scene.mesh.faces), |
|
images(_scene.images), |
|
scene(_scene), |
|
alternativeTexture(nullptr) |
|
{ |
|
} |
|
MeshTexture::~MeshTexture() |
|
{ |
|
vertexFaces.Release(); |
|
vertexBoundary.Release(); |
|
faceFaces.Release(); |
|
} |
|
|
|
void MeshTexture::ComputeFaceCurvatures() const { |
|
if (scene.mesh.vertices.empty() || scene.mesh.faces.empty()) |
|
return; // 避免操作空数据 |
|
|
|
const Mesh& mesh = scene.mesh; |
|
meshCurvatures.resize(mesh.faces.size()); |
|
|
|
// 1. 计算顶点曲率 |
|
std::vector<float> vertexCurvatures(mesh.vertices.size(), 0.0f); |
|
FOREACH(idxVert, mesh.vertices) { |
|
if (idxVert >= mesh.vertexFaces.size() || idxVert >= mesh.vertexNormals.size()) |
|
continue; |
|
|
|
const Normal& normalCenter = mesh.vertexNormals[idxVert]; |
|
float sumAngle = 0.0f; |
|
int count = 0; |
|
|
|
// 遍历相邻面 |
|
const Mesh::FaceIdxArr& vf = mesh.vertexFaces[idxVert]; // 获取该顶点的相邻面数组 |
|
for (FIndex adjFace : vf) { |
|
const Normal& adjNormal = mesh.faceNormals[adjFace]; |
|
sumAngle += ComputeAngleN(normalCenter.ptr(), adjNormal.ptr()); |
|
++count; |
|
} |
|
|
|
// 曲率 = 法线角度变化的方差 |
|
vertexCurvatures[idxVert] = (count > 1) ? sumAngle / count : 0.0f; |
|
} |
|
|
|
// 2. 转换为面曲率 |
|
FOREACH(idxFace, mesh.faces) { |
|
const Mesh::Face& f = mesh.faces[idxFace]; |
|
float curvature = 0.0f; |
|
for (int i = 0; i < 3; ++i) { |
|
curvature += vertexCurvatures[f[i]]; |
|
} |
|
meshCurvatures[idxFace] = curvature / 3.0f; |
|
} |
|
} |
|
|
|
// extract array of triangles incident to each vertex |
|
// and check each vertex if it is at the boundary or not |
|
void MeshTexture::ListVertexFaces() |
|
{ |
|
scene.mesh.EmptyExtra(); |
|
scene.mesh.ListIncidenteFaces(); |
|
scene.mesh.ListBoundaryVertices(); |
|
scene.mesh.ListIncidenteFaceFaces(); |
|
} |
|
|
|
// extract array of faces viewed by each image |
|
bool MeshTexture::ListCameraFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces) |
|
{ |
|
// create faces octree |
|
Mesh::Octree octree; |
|
Mesh::FacesInserter::CreateOctree(octree, scene.mesh); |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical(invalid_faces_access) |
|
#endif |
|
{ |
|
// scene.mesh.invalidFaces.clear(); |
|
} |
|
|
|
// extract array of faces viewed by each image |
|
IIndexArr views(_views); |
|
if (views.empty()) { |
|
views.resize(images.size()); |
|
std::iota(views.begin(), views.end(), IIndex(0)); |
|
} |
|
facesDatas.resize(faces.size()); |
|
// viewDepthMaps.resize(views.size()); // 初始化深度图存储 |
|
Util::Progress progress(_T("Initialized views"), views.size()); |
|
typedef float real; |
|
TImage<real> imageGradMag; |
|
TImage<real>::EMat mGrad[2]; |
|
FaceMap faceMap; |
|
DepthMap depthMap; |
|
#ifdef TEXOPT_USE_OPENMP |
|
bool bAbort(false); |
|
#pragma omp parallel for private(imageGradMag, mGrad, faceMap, depthMap) |
|
for (int_t idx=0; idx<(int_t)views.size(); ++idx) { |
|
|
|
#pragma omp flush (bAbort) |
|
if (bAbort) { |
|
++progress; |
|
continue; |
|
} |
|
const IIndex idxView(views[(IIndex)idx]); |
|
#else |
|
for (IIndex idxView: views) { |
|
#endif |
|
Image& imageData = images[idxView]; |
|
if (!imageData.IsValid()) { |
|
++progress; |
|
continue; |
|
} |
|
|
|
std::string strPath = imageData.name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
/* |
|
// if (strName!="74_8" && strName!="13_8" && strName!="61_8" && |
|
// strName!="92_8" && strName!="101_8" && strName!="102_8" && |
|
// strName!="103_8" && strName!="112_8" && strName!="113_8" && |
|
// strName!="122_8" && strName!="123_8" && strName!="132_8") |
|
// if (strName!="74_8" && strName!="13_8" && strName!="61_8" && |
|
// strName!="92_8" && strName!="101_8" && strName!="102_8" && |
|
// strName!="103_8" && strName!="112_8" && strName!="113_8" && |
|
// strName!="122_2" && strName!="123_2" && strName!="121_2") |
|
// if (strName!="122_2") |
|
if (strName!="122_2" && strName!="123_2" && strName!="121_2") |
|
{ |
|
continue; |
|
} |
|
//*/ |
|
|
|
// load image |
|
unsigned level(nResolutionLevel); |
|
const unsigned imageSize(imageData.RecomputeMaxResolution(level, nMinResolution)); |
|
if ((imageData.image.empty() || MAXF(imageData.width,imageData.height) != imageSize) && !imageData.ReloadImage(imageSize)) { |
|
#ifdef TEXOPT_USE_OPENMP |
|
bAbort = true; |
|
#pragma omp flush (bAbort) |
|
continue; |
|
#else |
|
return false; |
|
#endif |
|
} |
|
imageData.UpdateCamera(scene.platforms); |
|
// compute gradient magnitude |
|
imageData.image.toGray(imageGradMag, cv::COLOR_BGR2GRAY, true); |
|
cv::Mat grad[2]; |
|
mGrad[0].resize(imageGradMag.rows, imageGradMag.cols); |
|
grad[0] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType<real>::type, (void*)mGrad[0].data()); |
|
mGrad[1].resize(imageGradMag.rows, imageGradMag.cols); |
|
grad[1] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType<real>::type, (void*)mGrad[1].data()); |
|
#if 1 |
|
cv::Sobel(imageGradMag, grad[0], cv::DataType<real>::type, 1, 0, 3, 1.0/8.0); |
|
cv::Sobel(imageGradMag, grad[1], cv::DataType<real>::type, 0, 1, 3, 1.0/8.0); |
|
#elif 1 |
|
const TMatrix<real,3,5> kernel(CreateDerivativeKernel3x5()); |
|
cv::filter2D(imageGradMag, grad[0], cv::DataType<real>::type, kernel); |
|
cv::filter2D(imageGradMag, grad[1], cv::DataType<real>::type, kernel.t()); |
|
#else |
|
const TMatrix<real,5,7> kernel(CreateDerivativeKernel5x7()); |
|
cv::filter2D(imageGradMag, grad[0], cv::DataType<real>::type, kernel); |
|
cv::filter2D(imageGradMag, grad[1], cv::DataType<real>::type, kernel.t()); |
|
#endif |
|
(TImage<real>::EMatMap)imageGradMag = (mGrad[0].cwiseAbs2()+mGrad[1].cwiseAbs2()).cwiseSqrt(); |
|
// apply some blur on the gradient to lower noise/glossiness effects onto face-quality score |
|
cv::GaussianBlur(imageGradMag, imageGradMag, cv::Size(15, 15), 0, 0, cv::BORDER_DEFAULT); |
|
// select faces inside view frustum |
|
Mesh::FaceIdxArr cameraFaces; |
|
Mesh::FacesInserter inserter(cameraFaces); |
|
const TFrustum<float,5> frustum(Matrix3x4f(imageData.camera.P), (float)imageData.width, (float)imageData.height); |
|
octree.Traverse(frustum, inserter); |
|
// project all triangles in this view and keep the closest ones |
|
faceMap.create(imageData.GetSize()); |
|
depthMap.create(imageData.GetSize()); |
|
|
|
std::unordered_set<FIndex> tempFaces; |
|
if (false) |
|
{ |
|
// viewDepthMaps[idxView] = depthMap; |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
// printf("1depthMap(r, c)=%f\n", depthMap(r, c)); |
|
} |
|
} |
|
|
|
RasterMesh::Triangle triangle; |
|
RasterMesh rasterer1(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); |
|
|
|
RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer1); |
|
if (nIgnoreMaskLabel >= 0) { |
|
// import mask |
|
BitMatrix bmask; |
|
// std::cout << "nIgnoreMaskLabel is open" << std::endl; |
|
DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer1.mask); |
|
} else if (nIgnoreMaskLabel == -1) { |
|
// creating mask to discard invalid regions created during image radial undistortion |
|
rasterer1.mask = DetectInvalidImageRegions(imageData.image); |
|
#if TD_VERBOSE != TD_VERBOSE_OFF |
|
if (VERBOSITY_LEVEL > 2) |
|
cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer1.mask); |
|
#endif |
|
} |
|
|
|
rasterer1.Clear(); |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical(invalid_faces_access) |
|
#endif |
|
{ |
|
std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
scene.mesh.invalidFaces.data.clear(); |
|
} |
|
printf("imageData.name=%s\n", imageData.name.c_str()); |
|
for (FIndex idxFace : cameraFaces) { |
|
if (idxFace >= faces.size()) { |
|
DEBUG_EXTRA("Invalid face index %u (max %u) in view %u", idxFace, faces.size()-1, idxView); |
|
continue; |
|
} |
|
|
|
// // 添加临界区保护 |
|
// bool skipFace = false; |
|
// #ifdef TEXOPT_USE_OPENMP |
|
// #pragma omp critical(invalid_faces_access) |
|
// #endif |
|
// { |
|
// std::lock_guard<std::mutex> lock(scene.mesh.invalidFacesMutex); |
|
// skipFace = (scene.mesh.invalidFaces.find(idxFace) != scene.mesh.invalidFaces.end()); |
|
// } |
|
|
|
// if (skipFace) |
|
// { |
|
// continue; |
|
// } |
|
|
|
rasterer1.validFace = true; |
|
const Face& facet = faces[idxFace]; |
|
rasterer1.idxFace = idxFace; |
|
|
|
if (scene.is_face_visible(strName.c_str(), idxFace)) |
|
{ |
|
rasterer1.Project(facet, triangleRasterizer); |
|
if (!rasterer1.validFace) |
|
rasterer1.Project(facet, triangleRasterizer); |
|
} |
|
} |
|
|
|
// if (!bUseVirtualFaces) |
|
// if (bUseVirtualFaces) |
|
tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); |
|
|
|
// RasterMesh rasterer2(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, true); |
|
RasterMesh rasterer2(*this, vertices, imageData.camera, depthMap, faceMap, std::ref(scene.mesh), true); |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
// if (depthMap(r, c)> 999.0f) |
|
// printf("2depthMap(r, c)=%f\n", depthMap(r, c)); |
|
} |
|
} |
|
|
|
RasterMesh::TriangleRasterizer triangleRasterizer2(triangle, rasterer2); |
|
if (nIgnoreMaskLabel >= 0) { |
|
// import mask |
|
BitMatrix bmask; |
|
// std::cout << "nIgnoreMaskLabel is open" << std::endl; |
|
DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer2.mask); |
|
} else if (nIgnoreMaskLabel == -1) { |
|
// creating mask to discard invalid regions created during image radial undistortion |
|
rasterer2.mask = DetectInvalidImageRegions(imageData.image); |
|
#if TD_VERBOSE != TD_VERBOSE_OFF |
|
if (VERBOSITY_LEVEL > 2) |
|
cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer2.mask); |
|
#endif |
|
} |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
// if (depthMap(r, c)> 999.0f) |
|
// printf("3depthMap(r, c)=%f, r=%d, c=%d, faceMap(r, c)=%d\n", depthMap(r, c), r, c, faceMap(r, c)); |
|
} |
|
} |
|
// rasterer2.Clear(); |
|
|
|
for (FIndex idxFace : cameraFaces) { |
|
// 添加面索引有效性检查 |
|
if (idxFace >= faces.size()) { |
|
DEBUG_EXTRA("Invalid face index %u (max %u) in view %u", idxFace, faces.size()-1, idxView); |
|
continue; |
|
} |
|
|
|
// 添加临界区保护 |
|
bool skipFace = false; |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical(invalid_faces_access) |
|
#endif |
|
{ |
|
std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
skipFace = (scene.mesh.invalidFaces.data.find(idxFace) != scene.mesh.invalidFaces.data.end()); |
|
} |
|
|
|
if (skipFace) |
|
{ |
|
continue; |
|
} |
|
|
|
if (tempFaces.find(idxFace) != tempFaces.end()) |
|
{ |
|
} |
|
else |
|
{ |
|
continue; |
|
} |
|
|
|
rasterer2.validFace = true; |
|
const Face& facet = faces[idxFace]; |
|
rasterer2.idxFace = idxFace; |
|
// rasterer2.Project(facet, triangleRasterizer2); |
|
// if (!rasterer2.validFace) |
|
// rasterer2.Project(facet, triangleRasterizer2); |
|
} |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
// if (depthMap(r, c)> 999.0f) |
|
// printf("4depthMap(r, c)=%f, r=%d, c=%d, faceMap(r, c)=%d\n", depthMap(r, c), r, c, faceMap(r, c)); |
|
} |
|
} |
|
} |
|
else |
|
{ |
|
// RasterMesh rasterer(vertices, imageData.camera, depthMap, faceMap); |
|
RasterMesh rasterer(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); |
|
RasterMesh::Triangle triangle; |
|
RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer); |
|
if (nIgnoreMaskLabel >= 0) { |
|
// import mask |
|
BitMatrix bmask; |
|
// std::cout << "nIgnoreMaskLabel is open" << std::endl; |
|
DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer.mask); |
|
} else if (nIgnoreMaskLabel == -1) { |
|
// creating mask to discard invalid regions created during image radial undistortion |
|
rasterer.mask = DetectInvalidImageRegions(imageData.image); |
|
#if TD_VERBOSE != TD_VERBOSE_OFF |
|
if (VERBOSITY_LEVEL > 2) |
|
cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer.mask); |
|
#endif |
|
} |
|
rasterer.Clear(); |
|
for (FIndex idxFace : cameraFaces) { |
|
rasterer.validFace = true; |
|
const Face& facet = faces[idxFace]; |
|
rasterer.idxFace = idxFace; |
|
|
|
if (scene.is_face_visible(strName.c_str(), idxFace)) |
|
{ |
|
rasterer.Project(facet, triangleRasterizer); |
|
if (!rasterer.validFace) |
|
rasterer.Project(facet, triangleRasterizer); |
|
} |
|
} |
|
|
|
// if (!bUseVirtualFaces) |
|
// if (bUseVirtualFaces) |
|
tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); |
|
} |
|
|
|
// compute the projection area of visible faces |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
CLISTDEF0IDX(uint32_t,FIndex) areas(faces.size()); |
|
areas.Memset(0); |
|
#endif |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical |
|
#endif |
|
|
|
{ |
|
// faceQuality is influenced by : |
|
// + area: the higher the area the more gradient scores will be added to the face quality |
|
// + sharpness: sharper image or image resolution or how close is to the face will result in higher gradient on the same face |
|
// ON GLOSS IMAGES it happens to have a high volatile sharpness depending on how the light reflects under different angles |
|
// + angle: low angle increases the surface area |
|
for (int j=0; j<faceMap.rows; ++j) { |
|
for (int i=0; i<faceMap.cols; ++i) { |
|
const FIndex& idxFace = faceMap(j,i); |
|
// ASSERT((idxFace == NO_ID && depthMap(j,i) == 0) || (idxFace != NO_ID && depthMap(j,i) > 0)); |
|
|
|
if (idxFace == NO_ID) |
|
continue; |
|
|
|
FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
|
|
const Pixel8U& pixel = imageData.image(j, i); |
|
// 假设是8位图像,RGB三个通道任一超过250即视为过曝 |
|
if (pixel.r > 250 || pixel.g > 250 || pixel.b > 250) { |
|
// continue; |
|
} |
|
|
|
// if (!(scene.mesh.invalidFacesRelative.data.contains(idxFace) && scene.is_face_visible_relative(idxFace))) |
|
// if (false) |
|
{ |
|
if (depthMap(j,i)>999.0f) |
|
{ |
|
/* |
|
// continue; |
|
// printf("idxFace=%d, depthMap(j,i=%f\n", idxFace, depthMap(j,i)); |
|
FaceData& faceData = faceDatas.emplace_back(); |
|
faceData.idxView = idxView; |
|
faceData.quality = imageGradMag(j,i); |
|
faceData.bInvalidFacesRelative = true; |
|
// printf("faceData.quality=%f\n", faceData.quality); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color = imageData.image(j,i); |
|
#endif |
|
continue; |
|
*/ |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
uint32_t& area = areas[idxFace]; |
|
if (area++ == 0) { |
|
#else |
|
if (faceDatas.empty() || faceDatas.back().idxView != idxView) { |
|
#endif |
|
// create new face-data |
|
FaceData& faceData = faceDatas.emplace_back(); |
|
faceData.idxView = idxView; |
|
faceData.quality = imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color = imageData.image(j,i); |
|
#endif |
|
faceData.bInvalidFacesRelative = true; |
|
} else { |
|
// update face-data |
|
ASSERT(!faceDatas.empty()); |
|
FaceData& faceData = faceDatas.back(); |
|
ASSERT(faceData.idxView == idxView); |
|
faceData.quality = imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color = Color(imageData.image(j,i)); |
|
#endif |
|
faceData.bInvalidFacesRelative = true; |
|
} |
|
continue; |
|
} |
|
} |
|
|
|
// if (tempFaces.find(idxFace) == tempFaces.end()) |
|
// continue; |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
uint32_t& area = areas[idxFace]; |
|
if (area++ == 0) { |
|
#else |
|
if (faceDatas.empty() || faceDatas.back().idxView != idxView) { |
|
#endif |
|
// create new face-data |
|
FaceData& faceData = faceDatas.emplace_back(); |
|
faceData.idxView = idxView; |
|
faceData.quality = imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color = imageData.image(j,i); |
|
#endif |
|
if (depthMap(j,i)>999.0f) |
|
faceData.bInvalidFacesRelative = true; |
|
} else { |
|
// update face-data |
|
ASSERT(!faceDatas.empty()); |
|
FaceData& faceData = faceDatas.back(); |
|
ASSERT(faceData.idxView == idxView); |
|
faceData.quality += imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color += Color(imageData.image(j,i)); |
|
#endif |
|
if (depthMap(j,i)>999.0f) |
|
faceData.bInvalidFacesRelative = true; |
|
} |
|
} |
|
} |
|
// adjust face quality with camera angle relative to face normal |
|
// tries to increase chances of a camera with perpendicular view on the surface (smoothened normals) to be selected |
|
FOREACH(idxFace, facesDatas) { |
|
FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
if (faceDatas.empty() || faceDatas.back().idxView != idxView) |
|
continue; |
|
const Face& f = faces[idxFace]; |
|
const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); |
|
const Point3f camDir(Cast<Mesh::Type>(imageData.camera.C) - faceCenter); |
|
const Normal& faceNormal = scene.mesh.faceNormals[idxFace]; |
|
const float cosFaceCam(MAXF(0.001f, ComputeAngle(camDir.ptr(), faceNormal.ptr()))); |
|
faceDatas.back().quality *= SQUARE(cosFaceCam); |
|
} |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
FOREACH(idxFace, areas) { |
|
const uint32_t& area = areas[idxFace]; |
|
if (area > 0) { |
|
Color& color = facesDatas[idxFace].back().color; |
|
color = RGB2YCBCR(Color(color * (1.f/(float)area))); |
|
} |
|
} |
|
#endif |
|
} |
|
++progress; |
|
} |
|
#ifdef TEXOPT_USE_OPENMP |
|
if (bAbort) |
|
return false; |
|
#endif |
|
progress.close(); |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
if (fOutlierThreshold > 0) { |
|
// try to detect outlier views for each face |
|
// (views for which the face is occluded by a dynamic object in the scene, ex. pedestrians) |
|
for (FaceDataArr& faceDatas: facesDatas) |
|
FaceOutlierDetection(faceDatas, fOutlierThreshold); |
|
} |
|
#endif |
|
return true; |
|
} |
|
|
|
bool MeshTexture::CheckInvalidFaces(FaceDataViewArr& facesDatas, float fOutlierThreshold, int nIgnoreMaskLabel, const IIndexArr& _views, bool bUseVirtualFaces) |
|
{ |
|
// create faces octree |
|
Mesh::Octree octree; |
|
Mesh::FacesInserter::CreateOctree(octree, scene.mesh); |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical(invalid_faces_access) |
|
#endif |
|
{ |
|
// scene.mesh.invalidFaces.clear(); |
|
} |
|
|
|
// extract array of faces viewed by each image |
|
IIndexArr views(_views); |
|
if (views.empty()) { |
|
views.resize(images.size()); |
|
std::iota(views.begin(), views.end(), IIndex(0)); |
|
} |
|
// facesDatas.resize(faces.size()); |
|
Util::Progress progress(_T("Initialized views"), views.size()); |
|
typedef float real; |
|
TImage<real> imageGradMag; |
|
TImage<real>::EMat mGrad[2]; |
|
FaceMap faceMap; |
|
DepthMap depthMap; |
|
#ifdef TEXOPT_USE_OPENMP |
|
bool bAbort(false); |
|
#pragma omp parallel for private(imageGradMag, mGrad, faceMap, depthMap) |
|
for (int_t idx=0; idx<(int_t)views.size(); ++idx) { |
|
|
|
#pragma omp flush (bAbort) |
|
if (bAbort) { |
|
++progress; |
|
continue; |
|
} |
|
const IIndex idxView(views[(IIndex)idx]); |
|
#else |
|
for (IIndex idxView: views) { |
|
#endif |
|
Image& imageData = images[idxView]; |
|
if (!imageData.IsValid()) { |
|
++progress; |
|
continue; |
|
} |
|
|
|
std::string strPath = imageData.name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
// load image |
|
unsigned level(nResolutionLevel); |
|
const unsigned imageSize(imageData.RecomputeMaxResolution(level, nMinResolution)); |
|
if ((imageData.image.empty() || MAXF(imageData.width,imageData.height) != imageSize) && !imageData.ReloadImage(imageSize)) { |
|
#ifdef TEXOPT_USE_OPENMP |
|
bAbort = true; |
|
#pragma omp flush (bAbort) |
|
continue; |
|
#else |
|
return false; |
|
#endif |
|
} |
|
imageData.UpdateCamera(scene.platforms); |
|
// compute gradient magnitude |
|
imageData.image.toGray(imageGradMag, cv::COLOR_BGR2GRAY, true); |
|
cv::Mat grad[2]; |
|
mGrad[0].resize(imageGradMag.rows, imageGradMag.cols); |
|
grad[0] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType<real>::type, (void*)mGrad[0].data()); |
|
mGrad[1].resize(imageGradMag.rows, imageGradMag.cols); |
|
grad[1] = cv::Mat(imageGradMag.rows, imageGradMag.cols, cv::DataType<real>::type, (void*)mGrad[1].data()); |
|
#if 1 |
|
cv::Sobel(imageGradMag, grad[0], cv::DataType<real>::type, 1, 0, 3, 1.0/8.0); |
|
cv::Sobel(imageGradMag, grad[1], cv::DataType<real>::type, 0, 1, 3, 1.0/8.0); |
|
#elif 1 |
|
const TMatrix<real,3,5> kernel(CreateDerivativeKernel3x5()); |
|
cv::filter2D(imageGradMag, grad[0], cv::DataType<real>::type, kernel); |
|
cv::filter2D(imageGradMag, grad[1], cv::DataType<real>::type, kernel.t()); |
|
#else |
|
const TMatrix<real,5,7> kernel(CreateDerivativeKernel5x7()); |
|
cv::filter2D(imageGradMag, grad[0], cv::DataType<real>::type, kernel); |
|
cv::filter2D(imageGradMag, grad[1], cv::DataType<real>::type, kernel.t()); |
|
#endif |
|
(TImage<real>::EMatMap)imageGradMag = (mGrad[0].cwiseAbs2()+mGrad[1].cwiseAbs2()).cwiseSqrt(); |
|
// apply some blur on the gradient to lower noise/glossiness effects onto face-quality score |
|
cv::GaussianBlur(imageGradMag, imageGradMag, cv::Size(15, 15), 0, 0, cv::BORDER_DEFAULT); |
|
// select faces inside view frustum |
|
Mesh::FaceIdxArr cameraFaces; |
|
Mesh::FacesInserter inserter(cameraFaces); |
|
const TFrustum<float,5> frustum(Matrix3x4f(imageData.camera.P), (float)imageData.width, (float)imageData.height); |
|
octree.Traverse(frustum, inserter); |
|
// project all triangles in this view and keep the closest ones |
|
faceMap.create(imageData.GetSize()); |
|
depthMap.create(imageData.GetSize()); |
|
|
|
std::unordered_set<FIndex> tempFaces; |
|
{ |
|
// RasterMesh rasterer(vertices, imageData.camera, depthMap, faceMap); |
|
RasterMesh rasterer(*this, vertices, imageData.camera, depthMap, faceMap, scene.mesh, false); |
|
RasterMesh::Triangle triangle; |
|
RasterMesh::TriangleRasterizer triangleRasterizer(triangle, rasterer); |
|
if (nIgnoreMaskLabel >= 0) { |
|
// import mask |
|
BitMatrix bmask; |
|
// std::cout << "nIgnoreMaskLabel is open" << std::endl; |
|
DepthEstimator::ImportIgnoreMask(imageData, imageData.GetSize(), (uint16_t)OPTDENSE::nIgnoreMaskLabel, bmask, &rasterer.mask); |
|
} else if (nIgnoreMaskLabel == -1) { |
|
// creating mask to discard invalid regions created during image radial undistortion |
|
rasterer.mask = DetectInvalidImageRegions(imageData.image); |
|
#if TD_VERBOSE != TD_VERBOSE_OFF |
|
if (VERBOSITY_LEVEL > 2) |
|
cv::imwrite(String::FormatString("umask%04d.png", idxView), rasterer.mask); |
|
#endif |
|
} |
|
rasterer.Clear(); |
|
for (FIndex idxFace : cameraFaces) { |
|
rasterer.validFace = true; |
|
const Face& facet = faces[idxFace]; |
|
rasterer.idxFace = idxFace; |
|
|
|
if (scene.is_face_visible(strName.c_str(), idxFace)) |
|
{ |
|
rasterer.Project(facet, triangleRasterizer); |
|
if (!rasterer.validFace) |
|
rasterer.Project(facet, triangleRasterizer); |
|
} |
|
} |
|
|
|
// if (!bUseVirtualFaces) |
|
// if (bUseVirtualFaces) |
|
tempFaces = PerformLocalDepthConsistencyCheck(depthMap, faceMap, scene.mesh, idxView, strName); |
|
} |
|
|
|
// compute the projection area of visible faces |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
CLISTDEF0IDX(uint32_t,FIndex) areas(faces.size()); |
|
areas.Memset(0); |
|
#endif |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical |
|
#endif |
|
{ |
|
// faceQuality is influenced by : |
|
// + area: the higher the area the more gradient scores will be added to the face quality |
|
// + sharpness: sharper image or image resolution or how close is to the face will result in higher gradient on the same face |
|
// ON GLOSS IMAGES it happens to have a high volatile sharpness depending on how the light reflects under different angles |
|
// + angle: low angle increases the surface area |
|
for (int j=0; j<faceMap.rows; ++j) { |
|
for (int i=0; i<faceMap.cols; ++i) { |
|
const FIndex& idxFace = faceMap(j,i); |
|
// ASSERT((idxFace == NO_ID && depthMap(j,i) == 0) || (idxFace != NO_ID && depthMap(j,i) > 0)); |
|
|
|
if (idxFace == NO_ID) |
|
continue; |
|
|
|
FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
|
|
// // if (!(scene.mesh.invalidFacesRelative.data.contains(idxFace) && scene.is_face_visible_relative(idxFace))) |
|
// if (false) |
|
// { |
|
// if (depthMap(j,i)>999.0f) |
|
// { |
|
// // continue; |
|
// // printf("idxFace=%d, depthMap(j,i=%f\n", idxFace, depthMap(j,i)); |
|
// FaceData& faceData = faceDatas.emplace_back(); |
|
// faceData.idxView = idxView; |
|
// faceData.quality = imageGradMag(j,i); |
|
// faceData.bInvalidFacesRelative = true; |
|
// // printf("faceData.quality=%f\n", faceData.quality); |
|
// #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// faceData.color = imageData.image(j,i); |
|
// #endif |
|
// continue; |
|
// } |
|
// } |
|
|
|
// // if (tempFaces.find(idxFace) == tempFaces.end()) |
|
// // continue; |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
uint32_t& area = areas[idxFace]; |
|
if (area++ == 0) |
|
{ |
|
#else |
|
if (faceDatas.empty() || faceDatas.back().idxView != idxView) { |
|
#endif |
|
// create new face-data |
|
FaceData& faceData = faceDatas.emplace_back(); |
|
faceData.idxView = idxView; |
|
faceData.quality = imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color = imageData.image(j,i); |
|
#endif |
|
if (depthMap(j,i)>999.0f) |
|
faceData.bInvalidFacesRelative = true; |
|
} |
|
else |
|
{ |
|
// update face-data |
|
ASSERT(!faceDatas.empty()); |
|
FaceData& faceData = faceDatas.back(); |
|
ASSERT(faceData.idxView == idxView); |
|
faceData.quality += imageGradMag(j,i); |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
faceData.color += Color(imageData.image(j,i)); |
|
#endif |
|
if (depthMap(j,i)>999.0f) |
|
faceData.bInvalidFacesRelative = true; |
|
} |
|
} |
|
} |
|
// adjust face quality with camera angle relative to face normal |
|
// tries to increase chances of a camera with perpendicular view on the surface (smoothened normals) to be selected |
|
FOREACH(idxFace, facesDatas) { |
|
FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
if (faceDatas.empty() || faceDatas.back().idxView != idxView) |
|
continue; |
|
const Face& f = faces[idxFace]; |
|
const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); |
|
const Point3f camDir(Cast<Mesh::Type>(imageData.camera.C) - faceCenter); |
|
const Normal& faceNormal = scene.mesh.faceNormals[idxFace]; |
|
const float cosFaceCam(MAXF(0.001f, ComputeAngle(camDir.ptr(), faceNormal.ptr()))); |
|
faceDatas.back().quality *= SQUARE(cosFaceCam); |
|
} |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
FOREACH(idxFace, areas) { |
|
const uint32_t& area = areas[idxFace]; |
|
if (area > 0) { |
|
Color& color = facesDatas[idxFace].back().color; |
|
color = RGB2YCBCR(Color(color * (1.f/(float)area))); |
|
} |
|
} |
|
#endif |
|
} |
|
++progress; |
|
} |
|
#ifdef TEXOPT_USE_OPENMP |
|
if (bAbort) |
|
return false; |
|
#endif |
|
progress.close(); |
|
|
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
if (fOutlierThreshold > 0) { |
|
// try to detect outlier views for each face |
|
// (views for which the face is occluded by a dynamic object in the scene, ex. pedestrians) |
|
for (FaceDataArr& faceDatas: facesDatas) |
|
FaceOutlierDetection(faceDatas, fOutlierThreshold); |
|
} |
|
#endif |
|
return true; |
|
} |
|
|
|
bool MeshTexture::IsFaceVisibleAndValid(const FaceDataArr& faceDatas, const IIndexArr& selectedCams) const { |
|
for (IIndex camID : selectedCams) { |
|
bool valid = false; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.idxView == camID && !fd.bInvalidFacesRelative) { |
|
valid = true; |
|
break; |
|
} |
|
} |
|
if (!valid) return false; // 存在无效或不可见 |
|
} |
|
return true; |
|
} |
|
|
|
//* |
|
// 在MeshTexture类中添加局部深度一致性检查方法 |
|
std::unordered_set<FIndex> MeshTexture::PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh, IIndex idxView, std::string strViewName) |
|
{ |
|
// mesh.invalidFaces.clear(); |
|
|
|
std::unordered_set<FIndex> tempFaces; |
|
|
|
// 设置深度一致性阈值(可以根据场景调整) |
|
const float depthThreshold = 0.005f; // 绝对深度阈值(米)0.1f 0.01f 0.005f 0.001f |
|
const float relativeThreshold = 0.1f; // 相对深度阈值(5%)0.05f |
|
const int kernelSize = 30; // 检测核大小 30 |
|
const int halfKernel = kernelSize / 2; |
|
|
|
// 创建深度一致性标记图 |
|
Image8U consistencyMask(depthMap.size()); |
|
consistencyMask.memset(0); |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
const FIndex idxFace = faceMap(r, c); |
|
if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { |
|
continue; |
|
} |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical |
|
#endif |
|
{ |
|
tempFaces.insert(idxFace); |
|
} |
|
} |
|
} |
|
|
|
// 创建深度图的副本用于检查(避免修改原始深度图) |
|
DepthMap depthMapCopy = depthMap.clone(); |
|
|
|
int n1 = 0; |
|
int n2 = 0; |
|
int n3 = 0; |
|
int n4 = 0; |
|
|
|
for (int r = 1; r < depthMapCopy.rows - 1; ++r) |
|
for (int c = 1; c < depthMapCopy.cols - 1; ++c) |
|
consistencyMask(r, c) = 0; |
|
|
|
// 第一遍:标记所有深度不一致的像素 |
|
for (int r = halfKernel; r < depthMapCopy.rows - halfKernel; ++r) { |
|
for (int c = halfKernel; c < depthMapCopy.cols - halfKernel; ++c) { |
|
const Depth centerDepth = depthMapCopy(r, c); |
|
// const Depth centerDepth = depthMap(r, c); |
|
|
|
if (centerDepth <= 0.0f) |
|
{ |
|
++n1; |
|
// consistencyMask(r, c) = 255; // 标记为不一致 |
|
continue; |
|
} |
|
else |
|
++n2; |
|
|
|
// consistencyMask(r, c) = 255; // 标记为不一致 |
|
// continue; |
|
|
|
// if (centerDepth>0.0f) |
|
// printf("1Test Depth=%f\n", centerDepth); |
|
// 计算局部深度平均值 |
|
float sum = 0.0f; |
|
int count = 0; |
|
for (int dr = -halfKernel; dr <= halfKernel; ++dr) { |
|
for (int dc = -halfKernel; dc <= halfKernel; ++dc) { |
|
const Depth neighborDepth = depthMapCopy(r+dr, c+dc); |
|
if (neighborDepth > 0.0f) { |
|
sum += neighborDepth; |
|
count++; |
|
} |
|
} |
|
} |
|
|
|
// if (count < 4) // 要求至少有4个有效邻居 |
|
// continue; |
|
|
|
const float avgDepth = sum / count; |
|
const float absDiff = std::abs(centerDepth - avgDepth); |
|
const float relDiff = absDiff / avgDepth; |
|
|
|
// printf("2Test %f, %f, %f, %f, %f, %f\n", centerDepth, avgDepth, absDiff, depthThreshold, relDiff, relativeThreshold); |
|
// 检查是否超过阈值 |
|
// if (absDiff > depthThreshold || relDiff > relativeThreshold) |
|
if (absDiff > depthThreshold) |
|
{ |
|
// printf("consistencyMask %d, %d\n", r, c); |
|
consistencyMask(r, c) = 255; // 标记为不一致 |
|
++n3; |
|
} |
|
} |
|
} |
|
|
|
// 创建膨胀后的掩码副本 |
|
Image8U dilatedMask = consistencyMask.clone(); |
|
|
|
// 仅对原始不一致像素进行膨胀 |
|
for (int r = 1; r < depthMapCopy.rows - 1; ++r) { |
|
for (int c = 1; c < depthMapCopy.cols - 1; ++c) { |
|
if (consistencyMask(r, c) != 255) // 只处理原始不一致像素 |
|
continue; |
|
|
|
n4++; |
|
// 扩展标记区域(避免重复计数)9 2 |
|
for (int dr = -9; dr <= 9; ++dr) { |
|
for (int dc = -9; dc <= 9; ++dc) { |
|
const int nr = r + dr; |
|
const int nc = c + dc; |
|
|
|
// 确保在图像范围内 |
|
if (dilatedMask.isInside(ImageRef(nc, nr))) { |
|
dilatedMask(nr, nc) = 255; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
consistencyMask = dilatedMask; |
|
|
|
// printf("n1=%d, n2=%d, n3=%d, n4=%d\n", n1, n2, n3, n4); |
|
|
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
const FIndex idxFace = faceMap(r, c); |
|
if (consistencyMask(r, c) == 255) |
|
{ |
|
if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { |
|
continue; |
|
} |
|
|
|
{ |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp critical |
|
#endif |
|
{ |
|
std::lock_guard<std::mutex> lock(*mesh.invalidFaces.mtx); |
|
mesh.invalidFaces.data.insert(idxFace); // 标记面片无效 |
|
// mesh.invalidFacesAll[idxView].data.insert(idxFace); |
|
// std::lock_guard<std::mutex> lock2(*mesh.invalidFacesRelative.mtx); |
|
// mesh.invalidFacesRelative.data.insert(idxFace); // 标记面片无效 |
|
} |
|
} |
|
} |
|
else |
|
{ |
|
// std::lock_guard<std::mutex> lock(*mesh.invalidFacesRelative.mtx); |
|
// if (mesh.invalidFacesRelative.Contains(idxFace)) |
|
{ |
|
// mesh.invalidFacesRelative.Remove(idxFace); |
|
} |
|
} |
|
} |
|
} |
|
|
|
// 第三遍:清除不一致区域的深度值(现在可以安全修改原始depthMap) |
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
if (consistencyMask(r, c) == 255) |
|
{ |
|
const FIndex idxFace = faceMap(r, c); |
|
if (idxFace == NO_ID || idxFace >= mesh.faces.size()) { |
|
continue; |
|
} |
|
// if (!scene.is_face_visible(strViewName.c_str(), faceMap(r, c))) |
|
{ |
|
// depthMap(r, c) = 0; |
|
depthMap(r, c) = 1000.0f; |
|
// printf("depthMap(r, c)=%f, r=%d, c=%d\n",depthMap(r, c), r, c); |
|
// faceMap(r, c) = NO_ID; |
|
// printf("depthMap(r, c)=%f\n", depthMap(r, c)); |
|
} |
|
} |
|
|
|
} |
|
} |
|
|
|
// 可选:保存一致性掩码用于调试 |
|
#if TD_VERBOSE != TD_VERBOSE_OFF |
|
if (VERBOSITY_LEVEL > 2) { |
|
static int counter = 0; |
|
// cv::imwrite(String::FormatString("depth_consistency_%04d.png", counter++), consistencyMask); |
|
} |
|
#endif |
|
|
|
return tempFaces; |
|
} |
|
//*/ |
|
|
|
/* |
|
void MeshTexture::PerformLocalDepthConsistencyCheck(DepthMap& depthMap, FaceMap& faceMap, Mesh& mesh) { |
|
// 参数设置 |
|
const float depthThreshold = 0.05f; // 绝对深度阈值 |
|
const float relativeThreshold = 0.05f; // 相对深度阈值 |
|
const int kernelSize = 30; // 检测核大小 |
|
const int halfKernel = kernelSize / 2; |
|
|
|
// 创建深度一致性标记图 |
|
Image8U consistencyMask(depthMap.size()); |
|
consistencyMask.memset(0); |
|
|
|
// 使用积分图加速局部平均值计算 |
|
DepthMap integralMap; |
|
cv::integral(depthMap, integralMap, CV_32F); |
|
|
|
// 第一遍:标记所有深度不一致的像素 |
|
for (int r = halfKernel; r < depthMap.rows - halfKernel; ++r) { |
|
for (int c = halfKernel; c < depthMap.cols - halfKernel; ++c) { |
|
const Depth centerDepth = depthMap(r, c); |
|
if (centerDepth <= 0.0f) continue; |
|
|
|
// 使用积分图计算局部平均值 |
|
const float sum = integralMap(r+halfKernel+1, c+halfKernel+1) |
|
- integralMap(r-halfKernel, c+halfKernel+1) |
|
- integralMap(r+halfKernel+1, c-halfKernel) |
|
+ integralMap(r-halfKernel, c-halfKernel); |
|
const int count = kernelSize * kernelSize; |
|
const float avgDepth = sum / count; |
|
|
|
// 计算绝对差值和相对差值 |
|
const float absDiff = std::abs(centerDepth - avgDepth); |
|
const float relDiff = absDiff / avgDepth; |
|
|
|
// 结合绝对和相对阈值判断 |
|
if (absDiff > depthThreshold && relDiff > relativeThreshold) { |
|
// consistencyMask(r, c) = 255; |
|
} |
|
} |
|
} |
|
|
|
// 使用形态学膨胀扩展不一致区域 |
|
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5)); |
|
cv::dilate(consistencyMask, consistencyMask, kernel); |
|
|
|
// 标记不一致区域对应的面片为无效 |
|
std::unordered_set<FIndex> invalidFaces; |
|
for (int r = 0; r < depthMap.rows; ++r) { |
|
for (int c = 0; c < depthMap.cols; ++c) { |
|
if (consistencyMask(r, c) == 255) { |
|
const FIndex idxFace = faceMap(r, c); |
|
if (idxFace != NO_ID) { |
|
// invalidFaces.insert(idxFace); |
|
} |
|
// 将不一致像素标记为无效 |
|
depthMap(r, c) = 0; |
|
faceMap(r, c) = NO_ID; |
|
} |
|
} |
|
} |
|
|
|
// 更新网格的无效面片集合 |
|
for (FIndex idxFace : invalidFaces) { |
|
// mesh.invalidFaces.insert(idxFace); |
|
} |
|
} |
|
//*/ |
|
|
|
// order the camera view scores with highest score first and return the list of first <minCommonCameras> cameras |
|
// ratioAngleToQuality represents the ratio in witch we combine normal angle to quality for a face to obtain the selection score |
|
// - a ratio of 1 means only angle is considered |
|
// - a ratio of 0.5 means angle and quality are equally important |
|
// - a ratio of 0 means only camera quality is considered when sorting |
|
IIndexArr MeshTexture::SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const |
|
{ |
|
ASSERT(!faceDatas.empty()); |
|
#if 1 |
|
|
|
//* |
|
CLISTDEF0IDX(FaceData,IIndex) validFaceDatas; |
|
for (const FaceData& fd : faceDatas) { |
|
if (!fd.bInvalidFacesRelative) { // 跳过无效视图 |
|
validFaceDatas.emplace_back(fd); |
|
} |
|
} |
|
if (validFaceDatas.empty()) { |
|
// 若无有效视图,选择质量最高的视图(即使无效) |
|
float maxQuality = -1; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); |
|
} |
|
|
|
|
|
//*/ |
|
|
|
// compute scores based on the view quality and its angle to the face normal |
|
float maxQuality = 0; |
|
for (const FaceData& faceData: validFaceDatas) |
|
maxQuality = MAXF(maxQuality, faceData.quality); |
|
const Face& f = faces[fid]; |
|
const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); |
|
CLISTDEF0IDX(float,IIndex) scores(validFaceDatas.size()); |
|
FOREACH(idxFaceData, validFaceDatas) { |
|
const FaceData& faceData = validFaceDatas[idxFaceData]; |
|
const Image& imageData = images[faceData.idxView]; |
|
const Point3f camDir(Cast<Mesh::Type>(imageData.camera.C) - faceCenter); |
|
const Normal& faceNormal = scene.mesh.faceNormals[fid]; |
|
const float cosFaceCam(ComputeAngle(camDir.ptr(), faceNormal.ptr())); |
|
scores[idxFaceData] = ratioAngleToQuality*cosFaceCam + (1.f-ratioAngleToQuality)*faceData.quality/maxQuality; |
|
} |
|
// and sort the scores from to highest to smallest to get the best overall cameras |
|
IIndexArr scorePodium(validFaceDatas.size()); |
|
std::iota(scorePodium.begin(), scorePodium.end(), 0); |
|
scorePodium.Sort([&scores](IIndex i, IIndex j) { |
|
return scores[i] > scores[j]; |
|
}); |
|
|
|
#else |
|
|
|
// sort qualityPodium in relation to faceDatas[index].quality decreasing |
|
IIndexArr qualityPodium(faceDatas.size()); |
|
std::iota(qualityPodium.begin(), qualityPodium.end(), 0); |
|
qualityPodium.Sort([&faceDatas](IIndex i, IIndex j) { |
|
return faceDatas[i].quality > faceDatas[j].quality; |
|
}); |
|
|
|
// sort anglePodium in relation to face angle to camera increasing |
|
const Face& f = faces[fid]; |
|
const Vertex faceCenter((vertices[f[0]] + vertices[f[1]] + vertices[f[2]]) / 3.f); |
|
CLISTDEF0IDX(float,IIndex) cameraAngles(0, faceDatas.size()); |
|
for (const FaceData& faceData: faceDatas) { |
|
const Image& imageData = images[faceData.idxView]; |
|
const Point3f camDir(Cast<Mesh::Type>(imageData.camera.C) - faceCenter); |
|
const Normal& faceNormal = scene.mesh.faceNormals[fid]; |
|
const float cosFaceCam(ComputeAngle(camDir.ptr(), faceNormal.ptr())); |
|
cameraAngles.emplace_back(cosFaceCam); |
|
} |
|
IIndexArr anglePodium(faceDatas.size()); |
|
std::iota(anglePodium.begin(), anglePodium.end(), 0); |
|
anglePodium.Sort([&cameraAngles](IIndex i, IIndex j) { |
|
return cameraAngles[i] > cameraAngles[j]; |
|
}); |
|
|
|
// combine podium scores to get overall podium |
|
// and sort the scores in smallest to highest to get the best overall camera for current virtual face |
|
CLISTDEF0IDX(float,IIndex) scores(faceDatas.size()); |
|
scores.Memset(0); |
|
FOREACH(sIdx, faceDatas) { |
|
scores[anglePodium[sIdx]] += ratioAngleToQuality * (sIdx+1); |
|
scores[qualityPodium[sIdx]] += (1.f - ratioAngleToQuality) * (sIdx+1); |
|
} |
|
IIndexArr scorePodium(faceDatas.size()); |
|
std::iota(scorePodium.begin(), scorePodium.end(), 0); |
|
scorePodium.Sort([&scores](IIndex i, IIndex j) { |
|
return scores[i] < scores[j]; |
|
}); |
|
|
|
#endif |
|
IIndexArr cameras(MIN(minCommonCameras, validFaceDatas.size())); |
|
FOREACH(i, cameras) |
|
cameras[i] = validFaceDatas[scorePodium[i]].idxView; |
|
return cameras; |
|
} |
|
|
|
IIndexArr MeshTexture::SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const |
|
{ |
|
float maxQuality = -1; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
|
|
return (bestView != NO_ID) ? IIndexArr{bestView} : IIndexArr(); |
|
} |
|
|
|
static bool IsFaceVisible(const MeshTexture::FaceDataArr& faceDatas, const IIndexArr& cameraList) { |
|
size_t camFoundCounter(0); |
|
for (const MeshTexture::FaceData& faceData : faceDatas) { |
|
const IIndex cfCam = faceData.idxView; |
|
for (IIndex camId : cameraList) { |
|
if (cfCam == camId) { |
|
if (++camFoundCounter == cameraList.size()) |
|
return true; |
|
break; |
|
} |
|
} |
|
} |
|
return camFoundCounter == cameraList.size(); |
|
} |
|
|
|
// build virtual faces with: |
|
// - similar normal |
|
// - high percentage of common images that see them |
|
void MeshTexture::CreateVirtualFaces(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
if (cosFaceToCenter < cosMaxNormalDeviation) |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
unsigned processedFaces(0); |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
if (faceData.idxView == idxView) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
break; |
|
} |
|
} |
|
} |
|
ASSERT(processedFaces > 0); |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
} |
|
|
|
void MeshTexture::CreateVirtualFaces2(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, |
|
VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, |
|
const Mesh::FaceIdxArr& faceIndices, float thMaxNormalDeviation) const |
|
{ |
|
// 使用正确的日志宏 |
|
VERBOSE("CreateVirtualFaces2: starting with %zu faces", faceIndices.size()); |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faceIndices); // 只处理传入的面片 |
|
|
|
// 确保面索引有效 |
|
for (FIndex idx : remainingFaces) { |
|
if (idx >= faces.size()) { |
|
VERBOSE("Invalid face index in input: %u (max: %zu)", idx, faces.size()); |
|
return; |
|
} |
|
} |
|
|
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
|
|
// 确保remainingFaces不为空 |
|
if (remainingFaces.empty()) { |
|
VERBOSE("CreateVirtualFaces2: no faces to process"); |
|
return; |
|
} |
|
|
|
size_t iteration = 0; |
|
const size_t MAX_ITERATIONS = 1000000; // 防止无限循环 |
|
|
|
do { |
|
iteration++; |
|
if (iteration > MAX_ITERATIONS) { |
|
VERBOSE("CreateVirtualFaces2: exceeded max iterations (%zu)", MAX_ITERATIONS); |
|
break; |
|
} |
|
|
|
// 检查剩余面片是否为空 |
|
if (remainingFaces.empty()) { |
|
VERBOSE("CreateVirtualFaces2: no more faces to process"); |
|
break; |
|
} |
|
|
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 验证中心面片ID有效性 |
|
if (virtualFaceCenterFaceID >= faces.size()) { |
|
VERBOSE("Invalid center face ID: %u (max: %zu)", virtualFaceCenterFaceID, faces.size()); |
|
remainingFaces.RemoveAtMove(startPos); |
|
continue; |
|
} |
|
|
|
VERBOSE("Processing virtual face center: %u (iteration %zu)", virtualFaceCenterFaceID, iteration); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
|
|
// 验证面数据有效性 |
|
if (virtualFaceCenterFaceID >= facesDatas.size()) { |
|
VERBOSE("Face data index out of bounds: %u (max: %zu)", virtualFaceCenterFaceID, facesDatas.size()); |
|
remainingFaces.RemoveAtMove(startPos); |
|
continue; |
|
} |
|
|
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
|
|
// 选择公共相机 |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
|
|
if (centerFaceDatas.empty()) { |
|
VERBOSE("Center face %u has no view data", virtualFaceCenterFaceID); |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
if (posToErase == Mesh::FaceIdxArr::NO_INDEX) { |
|
VERBOSE("Face %u not found in remaining faces", virtualFaceCenterFaceID); |
|
} else { |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} |
|
} else { |
|
const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
|
|
// 验证选择的相机有效性 |
|
for (IIndex camIdx : selectedCams) { |
|
if (camIdx >= images.size()) { |
|
VERBOSE("Invalid camera index: %u (max: %zu)", camIdx, images.size()); |
|
return; |
|
} |
|
} |
|
|
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
queuedFaces.insert(virtualFaceCenterFaceID); |
|
|
|
size_t queueIteration = 0; |
|
const size_t MAX_QUEUE_ITERATIONS = 100000; // 防止无限循环 |
|
|
|
do { |
|
queueIteration++; |
|
if (queueIteration > MAX_QUEUE_ITERATIONS) { |
|
VERBOSE("Queue processing exceeded max iterations (%zu)", MAX_QUEUE_ITERATIONS); |
|
break; |
|
} |
|
|
|
if (currentVirtualFaceQueue.IsEmpty()) { |
|
VERBOSE("Queue is empty"); |
|
break; |
|
} |
|
|
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
|
|
// 验证当前面片ID有效性 |
|
if (currentFaceId >= faces.size()) { |
|
VERBOSE("Invalid current face ID: %u (max: %zu)", currentFaceId, faces.size()); |
|
continue; |
|
} |
|
|
|
VERBOSE("Processing neighbor face: %u", currentFaceId); |
|
|
|
// 检查法线角度 |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
|
|
if (cosFaceToCenter < cosMaxNormalDeviation) { |
|
VERBOSE("Face %u normal angle too large (cos: %f)", currentFaceId, cosFaceToCenter); |
|
continue; |
|
} |
|
|
|
// 检查当前面是否被selectedCams中的所有相机看到 |
|
if (selectedCams.empty()) { |
|
VERBOSE("Selected cameras list is empty for face %u", currentFaceId); |
|
continue; |
|
} |
|
|
|
// 验证面数据索引有效性 |
|
if (currentFaceId >= facesDatas.size()) { |
|
VERBOSE("Face data index out of bounds: %u (max: %zu)", currentFaceId, facesDatas.size()); |
|
continue; |
|
} |
|
|
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) { |
|
VERBOSE("Face %u not visible by all selected cameras", currentFaceId); |
|
continue; |
|
} |
|
|
|
// 从剩余面中移除并加入虚拟面 |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
if (posToErase == Mesh::FaceIdxArr::NO_INDEX) { |
|
VERBOSE("Face %u already processed", currentFaceId); |
|
} else { |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
VERBOSE("Added face %u to virtual face (total: %zu)", currentFaceId, virtualFace.size()); |
|
} |
|
|
|
// 添加所有新邻居到队列 |
|
if (currentFaceId >= faceFaces.size()) { |
|
VERBOSE("FaceFaces index out of bounds: %u (max: %zu)", currentFaceId, faceFaces.size()); |
|
continue; |
|
} |
|
|
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
|
|
// 验证邻居面片ID有效性 |
|
if (fIdx >= faces.size()) { |
|
VERBOSE("Invalid neighbor face ID: %u (max: %zu)", fIdx, faces.size()); |
|
continue; |
|
} |
|
|
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
VERBOSE("Queued neighbor face: %u", fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
|
|
// 计算虚拟面质量和创建虚拟面 |
|
for (IIndex idxView : selectedCams) { |
|
// 验证视图索引有效性 |
|
if (idxView >= images.size()) { |
|
VERBOSE("Invalid view index: %u (max: %zu)", idxView, images.size()); |
|
continue; |
|
} |
|
|
|
FaceData virtualFaceData; |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
unsigned processedFaces = 0; |
|
|
|
for (FIndex fid : virtualFace) { |
|
// 验证面片ID有效性 |
|
if (fid >= facesDatas.size()) { |
|
VERBOSE("Invalid face ID in virtual face: %u (max: %zu)", fid, facesDatas.size()); |
|
continue; |
|
} |
|
|
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
bool found = false; |
|
|
|
for (const FaceData& faceData : faceDatas) { |
|
if (faceData.idxView == idxView) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
processedFaces++; |
|
found = true; |
|
break; |
|
} |
|
} |
|
|
|
if (!found) { |
|
VERBOSE("Face %u has no data for view %u", fid, idxView); |
|
} |
|
} |
|
|
|
if (processedFaces > 0) { |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
virtualFaceDatas.emplace_back(virtualFaceData); |
|
VERBOSE("Added view %u to virtual face with %u faces", idxView, processedFaces); |
|
} else { |
|
VERBOSE("No valid data for view %u in virtual face", idxView); |
|
} |
|
} |
|
|
|
if (virtualFaceDatas.empty()) { |
|
VERBOSE("Virtual face has no valid views"); |
|
} |
|
} |
|
|
|
if (!virtualFace.empty()) { |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
VERBOSE("Created virtual face with %zu faces and %zu views", |
|
virtualFaces.back().size(), virtualFacesDatas.back().size()); |
|
} else { |
|
VERBOSE("Skipping empty virtual face"); |
|
} |
|
|
|
VERBOSE("Remaining faces: %zu", remainingFaces.size()); |
|
} while (!remainingFaces.empty()); |
|
|
|
VERBOSE("CreateVirtualFaces2: created %zu virtual faces", virtualFaces.size()); |
|
} |
|
|
|
void MeshTexture::CreateVirtualFaces3(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
// if (cosFaceToCenter < cosMaxNormalDeviation) |
|
// continue; |
|
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
|
|
/* |
|
// #ifdef TEXOPT_USE_OPENMP |
|
// #pragma omp critical |
|
// #endif |
|
// std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
// if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { |
|
// continue; // 跳过无效面 |
|
// } |
|
|
|
// 检查是否被所有选定相机有效看到 |
|
if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { |
|
continue; |
|
} |
|
//*/ |
|
|
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
unsigned processedFaces(0); |
|
bool bInvalidFacesRelative = false; |
|
int invalidCount = 0; |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
if (faceData.idxView == idxView) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
if (faceData.bInvalidFacesRelative) |
|
++invalidCount; |
|
break; |
|
} |
|
} |
|
} |
|
ASSERT(processedFaces > 0); |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces / 2); |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
} |
|
|
|
void MeshTexture::CreateVirtualFaces4(const FaceDataViewArr& facesDatas, |
|
FaceDataViewArr& virtualFacesDatas, |
|
VirtualFaceIdxsArr& virtualFaces, |
|
Mesh::FaceIdxArr& mapFaceToVirtualFace, |
|
unsigned minCommonCameras, |
|
float thMaxNormalDeviation) |
|
{ |
|
// 初始化数据结构 |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
|
|
// 创建面片到虚拟面片的映射(关键修复:确保安全访问) |
|
// Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); |
|
// mapFaceToVirtualFace.Memset(NO_ID); // 初始化为无效值 |
|
|
|
// 关键参数:限制虚拟面片大小和数量 |
|
const size_t MAX_VIRTUAL_FACE_SIZE = 50; |
|
const size_t MAX_TOTAL_VIRTUAL_FACES = 5000; |
|
|
|
// 主循环:创建虚拟面片 |
|
while (!remainingFaces.empty() && virtualFaces.size() < MAX_TOTAL_VIRTUAL_FACES) { |
|
// 随机选择起始面片 |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 关键安全修复:检查面片ID是否在有效范围内 |
|
if (virtualFaceCenterFaceID >= faces.size() || |
|
virtualFaceCenterFaceID >= meshCurvatures.size() || |
|
virtualFaceCenterFaceID >= scene.mesh.faceNormals.size() || |
|
virtualFaceCenterFaceID >= facesDatas.size()) { |
|
// DEBUG_EXTRA("Warning: Invalid center face ID: %u (max faces: %zu)", |
|
// virtualFaceCenterFaceID, faces.size()); |
|
remainingFaces.RemoveAtMove(startPos); |
|
continue; |
|
} |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
|
|
// 跳过无效中心面片 |
|
if (centerFaceDatas.empty()) { |
|
// DEBUG_EXTRA("Warning: Center face %u has no view data", virtualFaceCenterFaceID); |
|
remainingFaces.RemoveAtMove(startPos); |
|
continue; |
|
} |
|
|
|
// 选择公共相机 |
|
IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
|
|
// 严格筛选有效视图 |
|
if (selectedCams.size() < minCommonCameras) { |
|
// DEBUG_EXTRA("Warning: Insufficient common cameras for face %u: %u < %u", |
|
// virtualFaceCenterFaceID, selectedCams.size(), minCommonCameras); |
|
remainingFaces.RemoveAtMove(startPos); |
|
continue; |
|
} |
|
|
|
// 创建新虚拟面片 |
|
Mesh::FaceIdxArr currentVirtualFace; |
|
FaceDataArr currentVirtualFaceDatas; |
|
currentVirtualFace.push_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
mapFaceToVirtualFace[virtualFaceCenterFaceID] = virtualFaces.size(); |
|
|
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
queuedFaces.insert(virtualFaceCenterFaceID); |
|
|
|
size_t faceCount = 1; |
|
|
|
// 扩展虚拟面片 |
|
while (!currentVirtualFaceQueue.IsEmpty() && faceCount < MAX_VIRTUAL_FACE_SIZE) { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
|
|
// 关键安全修复:检查当前面片ID是否在有效范围内 |
|
if (currentFaceId >= faces.size() || |
|
currentFaceId >= scene.mesh.faceNormals.size() || |
|
currentFaceId >= faceFaces.size() || |
|
currentFaceId >= facesDatas.size()) { |
|
// DEBUG_EXTRA("Warning: Invalid current face ID: %u (max faces: %zu)", |
|
// currentFaceId, faces.size()); |
|
continue; |
|
} |
|
|
|
// 法线相似性检查 |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float dotProduct = normalCenter.dot(faceNormal); |
|
if (dotProduct < dynamicCosTh) { |
|
continue; |
|
} |
|
|
|
// 视图有效性检查 |
|
bool bValidInAllCameras = true; |
|
for (IIndex view : selectedCams) { |
|
bool found = false; |
|
for (const FaceData& fd : facesDatas[currentFaceId]) { |
|
if (fd.idxView == view && !fd.bInvalidFacesRelative) { |
|
found = true; |
|
break; |
|
} |
|
} |
|
if (!found) { |
|
bValidInAllCameras = false; |
|
break; |
|
} |
|
} |
|
if (!bValidInAllCameras) continue; |
|
|
|
// 添加到虚拟面片 |
|
if (!selectedFaces[currentFaceId]) { |
|
selectedFaces[currentFaceId] = true; |
|
currentVirtualFace.push_back(currentFaceId); |
|
mapFaceToVirtualFace[currentFaceId] = virtualFaces.size(); |
|
faceCount++; |
|
} |
|
|
|
// 添加邻居面片 |
|
const Mesh::FaceFaces& adjFaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; i++) { |
|
const FIndex neighborFace = adjFaces[i]; |
|
if (neighborFace == NO_ID) continue; |
|
|
|
// 关键安全修复:检查邻居面片ID是否在有效范围内 |
|
if (neighborFace >= faces.size()) { |
|
// DEBUG_EXTRA("Warning: Invalid neighbor face ID: %u (max faces: %zu)", |
|
// neighborFace, faces.size()); |
|
continue; |
|
} |
|
|
|
if (!selectedFaces[neighborFace] && queuedFaces.find(neighborFace) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(neighborFace); |
|
queuedFaces.insert(neighborFace); |
|
} |
|
} |
|
} |
|
|
|
// 确保虚拟面片足够大 |
|
if (currentVirtualFace.size() >= minCommonCameras) { |
|
// 创建虚拟面片数据 |
|
for (IIndex idxView : selectedCams) { |
|
FaceData virtualFaceData; |
|
virtualFaceData.quality = 0.0f; |
|
virtualFaceData.idxView = idxView; |
|
virtualFaceData.bInvalidFacesRelative = false; |
|
|
|
unsigned processedFaces = 0; |
|
for (FIndex fid : currentVirtualFace) { |
|
// 关键安全修复:检查面片ID是否在有效范围内 |
|
if (fid >= facesDatas.size()) { |
|
// DEBUG_EXTRA("Warning: Invalid face ID in virtual face: %u (max: %zu)", |
|
// fid, facesDatas.size()); |
|
continue; |
|
} |
|
|
|
for (const FaceData& fd : facesDatas[fid]) { |
|
if (fd.idxView == idxView && !fd.bInvalidFacesRelative) { |
|
virtualFaceData.quality += fd.quality; |
|
processedFaces++; |
|
break; |
|
} |
|
} |
|
} |
|
|
|
if (processedFaces > 0) { |
|
virtualFaceData.quality /= processedFaces; |
|
currentVirtualFaceDatas.push_back(virtualFaceData); |
|
} |
|
} |
|
|
|
if (!currentVirtualFaceDatas.empty()) { |
|
virtualFacesDatas.push_back(currentVirtualFaceDatas); |
|
virtualFaces.push_back(currentVirtualFace); |
|
} |
|
} |
|
|
|
// 从剩余面片中移除已处理面片 |
|
for (FIndex fid : currentVirtualFace) { |
|
const auto pos = remainingFaces.Find(fid); |
|
if (pos != Mesh::FaceIdxArr::NO_INDEX) { |
|
remainingFaces.RemoveAtMove(pos); |
|
} |
|
} |
|
} |
|
|
|
// 处理剩余面片(简化版) |
|
for (FIndex fid : remainingFaces) { |
|
// 关键安全修复:检查面片ID是否在有效范围内 |
|
if (fid >= faces.size() || fid >= facesDatas.size()) { |
|
// DEBUG_EXTRA("Warning: Invalid remaining face ID: %u (max: %zu)", |
|
// fid, faces.size()); |
|
continue; |
|
} |
|
|
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
if (faceDatas.empty()) continue; |
|
|
|
// 创建单面虚拟面片 |
|
Mesh::FaceIdxArr singleFace = {fid}; |
|
virtualFaces.push_back(singleFace); |
|
mapFaceToVirtualFace[fid] = virtualFaces.size() - 1; |
|
|
|
FaceDataArr singleFaceDatas; |
|
for (const FaceData& fd : faceDatas) { |
|
if (!fd.bInvalidFacesRelative) { |
|
singleFaceDatas.push_back(fd); |
|
} |
|
} |
|
virtualFacesDatas.push_back(singleFaceDatas); |
|
} |
|
|
|
// 关键安全修复:确保每个面片都有有效的虚拟面片映射 |
|
for (FIndex fid = 0; fid < faces.size(); fid++) { |
|
if (mapFaceToVirtualFace[fid] == NO_ID || mapFaceToVirtualFace[fid] >= virtualFaces.size()) { |
|
// 创建默认虚拟面片 |
|
Mesh::FaceIdxArr defaultVirtualFace = {fid}; |
|
virtualFaces.push_back(defaultVirtualFace); |
|
|
|
// 创建默认数据 |
|
FaceDataArr defaultData; |
|
if (fid < facesDatas.size()) { |
|
for (const FaceData& fd : facesDatas[fid]) { |
|
if (!fd.bInvalidFacesRelative) { |
|
defaultData.push_back(fd); |
|
} |
|
} |
|
} |
|
virtualFacesDatas.push_back(defaultData); |
|
|
|
// 更新映射 |
|
mapFaceToVirtualFace[fid] = virtualFaces.size() - 1; |
|
|
|
// DEBUG_EXTRA("Fixed mapping for face %u -> virtual face %zu", |
|
// fid, virtualFaces.size() - 1); |
|
} |
|
} |
|
|
|
// 最终验证映射关系 |
|
size_t validMappings = 0; |
|
size_t invalidMappings = 0; |
|
|
|
for (FIndex fid = 0; fid < faces.size(); fid++) { |
|
size_t virtualIdx = mapFaceToVirtualFace[fid]; |
|
|
|
if (virtualIdx >= virtualFaces.size()) { |
|
virtualIdx = 0; // 安全回退值 |
|
mapFaceToVirtualFace[fid] = 0; |
|
} |
|
|
|
if (virtualIdx < virtualFaces.size()) { |
|
validMappings++; |
|
} else { |
|
invalidMappings++; |
|
// DEBUG_EXTRA("Critical error: Face %u mapped to invalid virtual face %zu", |
|
// fid, virtualIdx); |
|
} |
|
} |
|
|
|
// DEBUG_EXTRA("Created %zu virtual faces with %zu valid mappings and %zu invalid mappings", |
|
// virtualFaces.size(), validMappings, invalidMappings); |
|
|
|
// 保存映射到成员变量,供后续使用 |
|
// m_mapFaceToVirtualFace = mapFaceToVirtualFace; |
|
} |
|
|
|
void MeshTexture::CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
// if (cosFaceToCenter < cosMaxNormalDeviation) |
|
// continue; |
|
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
|
|
/* |
|
// #ifdef TEXOPT_USE_OPENMP |
|
// #pragma omp critical |
|
// #endif |
|
// std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
// if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { |
|
// continue; // 跳过无效面 |
|
// } |
|
|
|
// 检查是否被所有选定相机有效看到 |
|
if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { |
|
continue; |
|
} |
|
//*/ |
|
|
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
unsigned processedFaces(0); |
|
bool bInvalidFacesRelative = false; |
|
int invalidCount = 0; |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
/* |
|
// if (faceData.idxView == idxView) { |
|
if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
if (faceData.bInvalidFacesRelative) |
|
++invalidCount; |
|
// break; |
|
} |
|
//*/ |
|
/* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) { |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.idxView != idxView) { |
|
++nViewCount; |
|
} |
|
} |
|
if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
//* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) |
|
{ |
|
for (const FaceData& fd : faceDatas) |
|
{ |
|
if ( faceData.bInvalidFacesRelative) |
|
{ |
|
++nViewCount; |
|
} |
|
} |
|
if (faceData.bInvalidFacesRelative) |
|
{ |
|
} |
|
else |
|
{ |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
} |
|
} |
|
ASSERT(processedFaces > 0); |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); |
|
// virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
} |
|
|
|
bool MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
float thMaxColorDeviation = 130.0f; |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
|
|
// Precompute average color for each face |
|
Colors faceColors; // 创建一个空列表 |
|
faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) |
|
for (size_t i = 0; i < faces.size(); ++i) { |
|
faceColors.push_back(Color::ZERO); // 逐个添加元素 |
|
} |
|
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { |
|
const FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
if (faceDatas.empty()) continue; |
|
Color sumColor = Color::ZERO; |
|
for (const FaceData& fd : faceDatas) { |
|
sumColor += fd.color; |
|
} |
|
faceColors[idxFace] = sumColor / faceDatas.size(); |
|
} |
|
|
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
|
|
// 检查中心面片是否包含无效视图 |
|
bool bHasInvalidView = false; |
|
int nInvalidViewCount = 0; |
|
int nTotalViewCount = 0; |
|
for (const FaceData& faceData : centerFaceDatas) { |
|
if (faceData.bInvalidFacesRelative) { |
|
bHasInvalidView = true; |
|
++nInvalidViewCount; |
|
// break; |
|
} |
|
++nTotalViewCount; |
|
} |
|
|
|
std::vector<std::pair<float, Color>> sortedViews; |
|
std::vector<std::pair<float, Color>> sortedLuminViews; |
|
std::vector<std::pair<float, Color>> validViews; |
|
sortedViews.reserve(centerFaceDatas.size()); |
|
for (const FaceData& fd : centerFaceDatas) { |
|
|
|
if (fd.bInvalidFacesRelative) |
|
{ |
|
// invalidView = fd.idxView; |
|
// invalidQuality = fd.quality; |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
} |
|
else |
|
{ |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
validViews.emplace_back(fd.quality, fd.color); |
|
} |
|
} |
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
std::sort(validViews.begin(), validViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
|
|
int nSize = sortedViews.size(); |
|
// int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); |
|
// 计算初始平均值 |
|
float totalQuality = 0.0f; |
|
Color totalColor(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality += sortedViews[n].first; |
|
totalColor += sortedViews[n].second; |
|
} |
|
const float avgQuality = totalQuality / nSize; |
|
const Color avgColor = totalColor / nSize; |
|
|
|
float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
float avgLuminance = totalLuminance / nSize; |
|
std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), |
|
[avgLuminance](const auto& a, const auto& b) { |
|
float luminDistA = cv::norm(avgLuminance - a.first); |
|
float luminDistB = cv::norm(avgLuminance - b.first); |
|
return luminDistA < luminDistB; }); |
|
|
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
|
|
//* |
|
// 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal) |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
// 过滤selectedCams:只保留夹角小于30度的视图 |
|
IIndexArr filteredCams; // 用于存储过滤后的视图索引 |
|
for (IIndex idxView : selectedCams) { |
|
const Image& imageData = images[idxView]; |
|
// 计算相机在世界坐标系中的朝向向量(相机镜面法线) |
|
const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他 |
|
// 相机局部坐标系中的向前向量 (0,0,-1) |
|
Point3f localForward(0.0f, 0.0f, -1.0f); |
|
// 手动计算矩阵乘法:cameraForward = R * localForward |
|
Point3f cameraForward; |
|
cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z; |
|
cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z; |
|
cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z; |
|
|
|
// 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数) |
|
float norm = std::sqrt(cameraForward.x * cameraForward.x + |
|
cameraForward.y * cameraForward.y + |
|
cameraForward.z * cameraForward.z); |
|
if (norm > 0.0f) { |
|
cameraForward.x /= norm; |
|
cameraForward.y /= norm; |
|
cameraForward.z /= norm; |
|
} else { |
|
// 处理零向量的情况,赋予默认值 |
|
cameraForward = Point3f(0, 0, -1); |
|
} |
|
|
|
// 计算夹角余弦值 - 使用已声明的 normalCenter |
|
// 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换 |
|
Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例 |
|
float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来) |
|
float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度 |
|
|
|
std::string strPath = imageData.name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
// printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID); |
|
|
|
if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID)) |
|
{ |
|
if (scene.is_face_edge(strName, virtualFaceCenterFaceID)) |
|
{ |
|
// printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit); |
|
|
|
if (angleDeg <= 45.0f) |
|
{ |
|
filteredCams.push_back(idxView); |
|
} |
|
} |
|
else |
|
{ |
|
filteredCams.push_back(idxView); |
|
} |
|
} |
|
} |
|
|
|
// 确保 selectedCams 是非 const 的,才能对其进行赋值 |
|
// 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr) |
|
if (filteredCams.empty()) { |
|
// 处理所有视图都被过滤的情况... |
|
// DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition."); |
|
|
|
// selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
selectedCams = filteredCams; |
|
isVirtualFace[virtualFaceCenterFaceID] = false; |
|
|
|
} else { |
|
selectedCams = filteredCams; |
|
isVirtualFace[virtualFaceCenterFaceID] = true; |
|
} |
|
//*/ |
|
|
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
// if (cosFaceToCenter < cosMaxNormalDeviation) |
|
// continue; |
|
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
|
|
// Check color similarity |
|
const Color& centerColor = faceColors[virtualFaceCenterFaceID]; |
|
const Color& currentColor = faceColors[currentFaceId]; |
|
// if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) |
|
{ |
|
float colorDistance = cv::norm(centerColor - currentColor); |
|
// printf("1colorDistance=%f\n", colorDistance); |
|
if (colorDistance > thMaxColorDeviation) { |
|
// printf("2colorDistance=%f\n", colorDistance); |
|
// continue; // Skip if color difference is too large |
|
} |
|
} |
|
|
|
/* |
|
// #ifdef TEXOPT_USE_OPENMP |
|
// #pragma omp critical |
|
// #endif |
|
// std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
// if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { |
|
// continue; // 跳过无效面 |
|
// } |
|
|
|
// 检查是否被所有选定相机有效看到 |
|
if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { |
|
continue; |
|
} |
|
//*/ |
|
|
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
|
|
/* |
|
if (selectedCams.empty()) { |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
|
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.color = medianColor; |
|
virtualFaceData.quality = medianQuality; |
|
|
|
} |
|
*/ |
|
|
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
int invalidQuality = 0; |
|
Color invalidColor = Point3f::ZERO; |
|
unsigned processedFaces(0); |
|
bool bInvalidFacesRelative = false; |
|
int invalidCount = 0; |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
/* |
|
if (faceData.idxView == idxView) { |
|
// if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
if (faceData.bInvalidFacesRelative) |
|
++invalidCount; |
|
break; |
|
} |
|
//*/ |
|
/* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) { |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.idxView != idxView) { |
|
++nViewCount; |
|
} |
|
} |
|
if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
//* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) |
|
{ |
|
for (const FaceData& fd : faceDatas) |
|
{ |
|
if ( faceData.bInvalidFacesRelative) |
|
{ |
|
++nViewCount; |
|
} |
|
} |
|
// if (faceData.bInvalidFacesRelative) |
|
if (bHasInvalidView) |
|
{ |
|
// invalidQuality += faceData.quality; |
|
// #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// invalidColor += faceData.color; |
|
// #endif |
|
|
|
++processedFaces; |
|
} |
|
else |
|
{ |
|
// virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
} |
|
} |
|
|
|
float maxLuminance = 120.0f; |
|
float minLuminance = 90.0f; |
|
int validViewsSize = validViews.size(); |
|
// bHasInvalidView = true; |
|
if (bHasInvalidView) |
|
{ |
|
// 使用鲁棒的统计方法计算颜色和亮度的中心值 |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
const float medianLuminance = ComputeMedianLuminance(sortedViews); |
|
|
|
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 |
|
const float colorMAD = ComputeColorMAD(sortedViews, medianColor); |
|
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); |
|
|
|
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) |
|
const float maxColorDeviation = 0.01f * colorMAD; |
|
const float maxLuminanceDeviation = 0.01f * luminanceMAD; |
|
|
|
std::vector<int> validIndices; |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation && |
|
luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
else |
|
{ |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
|
|
bool bColorSimilarity = true; |
|
// Check color similarity |
|
const Color& centerColor = faceColors[virtualFaceCenterFaceID]; |
|
const Color& currentColor = faceColors[currentFaceId]; |
|
|
|
float colorDistance = cv::norm(centerColor - currentColor); |
|
// printf("1colorDistance=%f\n", colorDistance); |
|
if (colorDistance > thMaxColorDeviation) { |
|
// printf("2colorDistance=%f\n", colorDistance); |
|
bColorSimilarity = false; |
|
} |
|
|
|
// if ((cosFaceToCenter<dynamicCosTh) || !IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
if (cosFaceToCenter<dynamicCosTh) |
|
{ |
|
if (nInvalidViewCount<=2) |
|
validIndices.push_back(n); |
|
else |
|
{ |
|
// if ((colorDistance <= 350.0f)) |
|
validIndices.push_back(n); |
|
} |
|
} |
|
else |
|
{ |
|
if (nInvalidViewCount<=2) |
|
validIndices.push_back(n); |
|
else |
|
{ |
|
// if (bColorSimilarity) |
|
validIndices.push_back(n); |
|
} |
|
} |
|
|
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation) |
|
{ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
/* |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
if (validViewsSize<=0&&false) |
|
{ |
|
//* |
|
// int nSize = sortedViews.size(); // (sortedViews.size() > 3) ? 3 : sortedViews.size(); |
|
|
|
// // 计算初始平均值 |
|
// float totalQuality = 0.0f; |
|
// Color totalColor(0,0,0); |
|
// for (int n = 0; n < nSize; ++n) { |
|
// totalQuality += sortedViews[n].first; |
|
// totalColor += sortedViews[n].second; |
|
// } |
|
// const float avgQuality = totalQuality / nSize; |
|
// const Color avgColor = totalColor / nSize; |
|
|
|
// 过滤偏差过大的视图 |
|
// std::vector<int> validIndices; |
|
float maxColorDeviation = 0.01f; // 颜色偏差阈值 |
|
float maxLuminanceDeviation = 0.01f; |
|
|
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
// printf("colorDistance=%f\n", colorDistance); |
|
|
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
// printf("viewLuminance=%f\n", viewLuminance); |
|
|
|
// if ((colorDistance<=maxColorDeviation)&& |
|
// (viewLuminance<=maxLuminance)&& |
|
// (viewLuminance>=minLuminance)){ |
|
if ((colorDistance <= maxColorDeviation) && |
|
(luminanceDistance <= maxLuminanceDeviation)) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
|
|
//* |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation){ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
|
|
if (colorDistance<=maxColorDeviation){ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
/* |
|
float maxColorDeviation2 = 0.05f; |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
|
|
if (colorDistance <= maxColorDeviation2) { |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
/* |
|
float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
float avgLuminance = totalLuminance / nSize; |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) { |
|
validIndices.push_back(n); |
|
} |
|
} |
|
//*/ |
|
|
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty()) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
// virtualFaceData.quality = avgQuality; |
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
|
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += sortedViews[idx].first; |
|
totalColor2 += sortedViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
else if (validViewsSize>0&&validViewsSize<=2&&false) |
|
{ |
|
/* |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.color = Point3f::ZERO; |
|
// int nSize = (validViews.size()>1) ? 1 : validViews.size(); |
|
int nSize = validViews.size(); |
|
for (int n=0; n<nSize; ++n) |
|
{ |
|
virtualFaceData.quality += validViews[n].first; |
|
virtualFaceData.color += validViews[n].second; |
|
} |
|
virtualFaceData.quality /= nSize; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= nSize; |
|
#endif |
|
*/ |
|
//* |
|
int nSize = validViews.size(); // (validViews.size() > 3) ? 3 : validViews.size(); |
|
|
|
// 计算初始平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality2 += validViews[n].first; |
|
totalColor2 += validViews[n].second; |
|
} |
|
const float avgQuality2 = totalQuality2 / nSize; |
|
const Color avgColor2 = totalColor2 / nSize; |
|
|
|
// 过滤偏差过大的视图 |
|
// std::vector<int> validIndices; |
|
float maxColorDeviation = 0.01f; // 颜色偏差阈值 |
|
|
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = validViews[n].second; |
|
float colorDistance = cv::norm(avgColor2 - viewColor); |
|
// printf("colorDistance=%f\n", colorDistance); |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
if ((colorDistance<=maxColorDeviation)&& |
|
(viewLuminance<=120.0f)){ |
|
// if (colorDistance <= maxColorDeviation) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
|
|
/* |
|
// float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
// float avgLuminance = totalLuminance / nSize; |
|
float maxLuminanceDeviation = 0.01f; |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
// printf("luminanceDistance=%f\n", luminanceDistance); |
|
if (luminanceDistance <= maxLuminanceDeviation) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
//*/ |
|
|
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty()) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
|
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
/* |
|
for (int n = 0; n < nSize; ++n) { |
|
float lumin = sortedLuminViews[n].first; |
|
|
|
if (lumin>=minLuminance&&lumin<=maxLuminance) |
|
{ |
|
// virtualFaceData.quality = avgQuality; |
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
break; |
|
} |
|
} |
|
//*/ |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
else |
|
{ |
|
//* |
|
ASSERT(processedFaces > 0); |
|
// virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color /= processedFaces; |
|
#endif |
|
|
|
virtualFaceData.quality = 0; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
//*/ |
|
/* |
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty() || validViews.size() <= 0) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
// virtualFaceData.quality = medianQuality; |
|
// virtualFaceData.color = medianColor; |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
} |
|
else |
|
{ |
|
// 使用鲁棒的统计方法计算颜色和亮度的中心值 |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
const float medianLuminance = ComputeMedianLuminance(sortedViews); |
|
|
|
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 |
|
const float colorMAD = ComputeColorMAD(sortedViews, medianColor); |
|
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); |
|
|
|
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) |
|
const float maxColorDeviation = 0.01f * colorMAD; |
|
const float maxLuminanceDeviation = 0.01f * luminanceMAD; |
|
|
|
std::vector<int> validIndices; |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
// if (colorDistance <= maxColorDeviation && |
|
// luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
|
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
} |
|
|
|
// virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); |
|
// virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
|
|
return true; |
|
} |
|
|
|
bool MeshTexture::CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
float thMaxColorDeviation = 130.0f; |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
|
|
// Precompute average color for each face |
|
Colors faceColors; // 创建一个空列表 |
|
faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) |
|
for (size_t i = 0; i < faces.size(); ++i) { |
|
faceColors.push_back(Color::ZERO); // 逐个添加元素 |
|
} |
|
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { |
|
const FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
if (faceDatas.empty()) continue; |
|
Color sumColor = Color::ZERO; |
|
for (const FaceData& fd : faceDatas) { |
|
sumColor += fd.color; |
|
} |
|
faceColors[idxFace] = sumColor / faceDatas.size(); |
|
} |
|
|
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
|
|
// 检查中心面片是否包含无效视图 |
|
bool bHasInvalidView = false; |
|
int nInvalidViewCount = 0; |
|
int nTotalViewCount = 0; |
|
for (const FaceData& faceData : centerFaceDatas) { |
|
if (faceData.bInvalidFacesRelative) { |
|
bHasInvalidView = true; |
|
++nInvalidViewCount; |
|
// break; |
|
} |
|
++nTotalViewCount; |
|
} |
|
|
|
std::vector<std::pair<float, Color>> sortedViews; |
|
std::vector<std::pair<float, Color>> sortedLuminViews; |
|
std::vector<std::pair<float, Color>> validViews; |
|
sortedViews.reserve(centerFaceDatas.size()); |
|
for (const FaceData& fd : centerFaceDatas) { |
|
|
|
if (fd.bInvalidFacesRelative) |
|
{ |
|
// invalidView = fd.idxView; |
|
// invalidQuality = fd.quality; |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
} |
|
else |
|
{ |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
validViews.emplace_back(fd.quality, fd.color); |
|
} |
|
} |
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
std::sort(validViews.begin(), validViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
|
|
int nSize = sortedViews.size(); |
|
// int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size(); |
|
// 计算初始平均值 |
|
float totalQuality = 0.0f; |
|
Color totalColor(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality += sortedViews[n].first; |
|
totalColor += sortedViews[n].second; |
|
} |
|
const float avgQuality = totalQuality / nSize; |
|
const Color avgColor = totalColor / nSize; |
|
|
|
float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
float avgLuminance = totalLuminance / nSize; |
|
std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), |
|
[avgLuminance](const auto& a, const auto& b) { |
|
float luminDistA = cv::norm(avgLuminance - a.first); |
|
float luminDistB = cv::norm(avgLuminance - b.first); |
|
return luminDistA < luminDistB; }); |
|
|
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
|
|
/* |
|
// 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal) |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
// 过滤selectedCams:只保留夹角小于30度的视图 |
|
IIndexArr filteredCams; // 用于存储过滤后的视图索引 |
|
for (IIndex idxView : selectedCams) { |
|
const Image& imageData = images[idxView]; |
|
// 计算相机在世界坐标系中的朝向向量(相机镜面法线) |
|
const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他 |
|
// 相机局部坐标系中的向前向量 (0,0,-1) |
|
Point3f localForward(0.0f, 0.0f, -1.0f); |
|
// 手动计算矩阵乘法:cameraForward = R * localForward |
|
Point3f cameraForward; |
|
cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z; |
|
cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z; |
|
cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z; |
|
|
|
// 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数) |
|
float norm = std::sqrt(cameraForward.x * cameraForward.x + |
|
cameraForward.y * cameraForward.y + |
|
cameraForward.z * cameraForward.z); |
|
if (norm > 0.0f) { |
|
cameraForward.x /= norm; |
|
cameraForward.y /= norm; |
|
cameraForward.z /= norm; |
|
} else { |
|
// 处理零向量的情况,赋予默认值 |
|
cameraForward = Point3f(0, 0, -1); |
|
} |
|
|
|
// 计算夹角余弦值 - 使用已声明的 normalCenter |
|
// 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换 |
|
Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例 |
|
float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来) |
|
float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度 |
|
|
|
std::string strPath = imageData.name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
// printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID); |
|
|
|
if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID)) |
|
{ |
|
if (scene.is_face_edge(strName, virtualFaceCenterFaceID)) |
|
{ |
|
// printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit); |
|
|
|
if (angleDeg <= 45.0f) |
|
{ |
|
filteredCams.push_back(idxView); |
|
} |
|
} |
|
else |
|
{ |
|
filteredCams.push_back(idxView); |
|
} |
|
} |
|
} |
|
|
|
// 确保 selectedCams 是非 const 的,才能对其进行赋值 |
|
// 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr) |
|
if (filteredCams.empty()) { |
|
// 处理所有视图都被过滤的情况... |
|
// DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition."); |
|
|
|
// selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
selectedCams = filteredCams; |
|
isVirtualFace[virtualFaceCenterFaceID] = false; |
|
|
|
} else { |
|
selectedCams = filteredCams; |
|
isVirtualFace[virtualFaceCenterFaceID] = true; |
|
} |
|
//*/ |
|
|
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
// if (cosFaceToCenter < cosMaxNormalDeviation) |
|
// continue; |
|
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
|
|
// Check color similarity |
|
const Color& centerColor = faceColors[virtualFaceCenterFaceID]; |
|
const Color& currentColor = faceColors[currentFaceId]; |
|
// if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) |
|
{ |
|
float colorDistance = cv::norm(centerColor - currentColor); |
|
// printf("1colorDistance=%f\n", colorDistance); |
|
if (colorDistance > thMaxColorDeviation) { |
|
// printf("2colorDistance=%f\n", colorDistance); |
|
// continue; // Skip if color difference is too large |
|
} |
|
} |
|
|
|
/* |
|
// #ifdef TEXOPT_USE_OPENMP |
|
// #pragma omp critical |
|
// #endif |
|
// std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx); |
|
// if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) { |
|
// continue; // 跳过无效面 |
|
// } |
|
|
|
// 检查是否被所有选定相机有效看到 |
|
if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) { |
|
continue; |
|
} |
|
//*/ |
|
|
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
|
|
/* |
|
if (selectedCams.empty()) { |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
|
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.color = medianColor; |
|
virtualFaceData.quality = medianQuality; |
|
|
|
} |
|
*/ |
|
|
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
int invalidQuality = 0; |
|
Color invalidColor = Point3f::ZERO; |
|
unsigned processedFaces(0); |
|
bool bInvalidFacesRelative = false; |
|
int invalidCount = 0; |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
/* |
|
// if (faceData.idxView == idxView) { |
|
if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
if (faceData.bInvalidFacesRelative) |
|
++invalidCount; |
|
break; |
|
} |
|
//*/ |
|
/* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) { |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.idxView != idxView) { |
|
++nViewCount; |
|
} |
|
} |
|
if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
/* |
|
int nViewCount = 0; |
|
if (faceData.idxView == idxView) |
|
{ |
|
for (const FaceData& fd : faceDatas) |
|
{ |
|
if ( faceData.bInvalidFacesRelative) |
|
{ |
|
++nViewCount; |
|
} |
|
} |
|
// if (faceData.bInvalidFacesRelative) |
|
if (bHasInvalidView) |
|
{ |
|
// invalidQuality += faceData.quality; |
|
// #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// invalidColor += faceData.color; |
|
// #endif |
|
|
|
++processedFaces; |
|
} |
|
else |
|
{ |
|
// virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color += faceData.color; |
|
#endif |
|
++processedFaces; |
|
// break; |
|
} |
|
} |
|
//*/ |
|
} |
|
} |
|
|
|
float maxLuminance = 120.0f; |
|
float minLuminance = 90.0f; |
|
int validViewsSize = validViews.size(); |
|
bHasInvalidView = true; |
|
if (bHasInvalidView) |
|
{ |
|
// 使用鲁棒的统计方法计算颜色和亮度的中心值 |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
const float medianLuminance = ComputeMedianLuminance(sortedViews); |
|
|
|
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 |
|
const float colorMAD = ComputeColorMAD(sortedViews, medianColor); |
|
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); |
|
|
|
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) |
|
const float maxColorDeviation = 0.01f * colorMAD; |
|
const float maxLuminanceDeviation = 0.01f * luminanceMAD; |
|
|
|
std::vector<int> validIndices; |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation && |
|
luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
else |
|
{ |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
|
|
bool bColorSimilarity = true; |
|
// Check color similarity |
|
const Color& centerColor = faceColors[virtualFaceCenterFaceID]; |
|
const Color& currentColor = faceColors[currentFaceId]; |
|
|
|
float colorDistance = cv::norm(centerColor - currentColor); |
|
// printf("1colorDistance=%f\n", colorDistance); |
|
if (colorDistance > thMaxColorDeviation) { |
|
// printf("2colorDistance=%f\n", colorDistance); |
|
bColorSimilarity = false; |
|
} |
|
|
|
// if ((cosFaceToCenter<dynamicCosTh) || !IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
if (cosFaceToCenter<dynamicCosTh) |
|
{ |
|
if (nInvalidViewCount<=2) |
|
validIndices.push_back(n); |
|
else |
|
{ |
|
// if ((colorDistance <= 350.0f)) |
|
validIndices.push_back(n); |
|
} |
|
} |
|
else |
|
{ |
|
if (nInvalidViewCount<=2) |
|
validIndices.push_back(n); |
|
else |
|
{ |
|
// if (bColorSimilarity) |
|
validIndices.push_back(n); |
|
} |
|
} |
|
|
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation) |
|
{ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
/* |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
if (colorDistance <= maxColorDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
if (validViewsSize<=0&&false) |
|
{ |
|
//* |
|
// int nSize = sortedViews.size(); // (sortedViews.size() > 3) ? 3 : sortedViews.size(); |
|
|
|
// // 计算初始平均值 |
|
// float totalQuality = 0.0f; |
|
// Color totalColor(0,0,0); |
|
// for (int n = 0; n < nSize; ++n) { |
|
// totalQuality += sortedViews[n].first; |
|
// totalColor += sortedViews[n].second; |
|
// } |
|
// const float avgQuality = totalQuality / nSize; |
|
// const Color avgColor = totalColor / nSize; |
|
|
|
// 过滤偏差过大的视图 |
|
// std::vector<int> validIndices; |
|
float maxColorDeviation = 0.01f; // 颜色偏差阈值 |
|
float maxLuminanceDeviation = 0.01f; |
|
|
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
// printf("colorDistance=%f\n", colorDistance); |
|
|
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
// printf("viewLuminance=%f\n", viewLuminance); |
|
|
|
// if ((colorDistance<=maxColorDeviation)&& |
|
// (viewLuminance<=maxLuminance)&& |
|
// (viewLuminance>=minLuminance)){ |
|
if ((colorDistance <= maxColorDeviation) && |
|
(luminanceDistance <= maxLuminanceDeviation)) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
|
|
//* |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation){ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
|
|
if (colorDistance<=maxColorDeviation){ |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
/* |
|
float maxColorDeviation2 = 0.05f; |
|
if (validIndices.empty()) { |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
|
|
if (colorDistance <= maxColorDeviation2) { |
|
validIndices.push_back(n); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
/* |
|
float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
float avgLuminance = totalLuminance / nSize; |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
|
|
if (luminanceDistance <= maxLuminanceDeviation) { |
|
validIndices.push_back(n); |
|
} |
|
} |
|
//*/ |
|
|
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty()) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
// virtualFaceData.quality = avgQuality; |
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
|
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += sortedViews[idx].first; |
|
totalColor2 += sortedViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
else if (validViewsSize>0&&validViewsSize<=2&&false) |
|
{ |
|
/* |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.color = Point3f::ZERO; |
|
// int nSize = (validViews.size()>1) ? 1 : validViews.size(); |
|
int nSize = validViews.size(); |
|
for (int n=0; n<nSize; ++n) |
|
{ |
|
virtualFaceData.quality += validViews[n].first; |
|
virtualFaceData.color += validViews[n].second; |
|
} |
|
virtualFaceData.quality /= nSize; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= nSize; |
|
#endif |
|
*/ |
|
//* |
|
int nSize = validViews.size(); // (validViews.size() > 3) ? 3 : validViews.size(); |
|
|
|
// 计算初始平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality2 += validViews[n].first; |
|
totalColor2 += validViews[n].second; |
|
} |
|
const float avgQuality2 = totalQuality2 / nSize; |
|
const Color avgColor2 = totalColor2 / nSize; |
|
|
|
// 过滤偏差过大的视图 |
|
// std::vector<int> validIndices; |
|
float maxColorDeviation = 0.01f; // 颜色偏差阈值 |
|
|
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = validViews[n].second; |
|
float colorDistance = cv::norm(avgColor2 - viewColor); |
|
// printf("colorDistance=%f\n", colorDistance); |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
if ((colorDistance<=maxColorDeviation)&& |
|
(viewLuminance<=120.0f)){ |
|
// if (colorDistance <= maxColorDeviation) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
|
|
/* |
|
// float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
// float avgLuminance = totalLuminance / nSize; |
|
float maxLuminanceDeviation = 0.01f; |
|
for (int n = 0; n < nSize; ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
float luminanceDistance = cv::norm(avgLuminance - viewLuminance); |
|
// printf("luminanceDistance=%f\n", luminanceDistance); |
|
if (luminanceDistance <= maxLuminanceDeviation) { |
|
// validIndices.push_back(n); |
|
} |
|
} |
|
//*/ |
|
|
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty()) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
|
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
/* |
|
for (int n = 0; n < nSize; ++n) { |
|
float lumin = sortedLuminViews[n].first; |
|
|
|
if (lumin>=minLuminance&&lumin<=maxLuminance) |
|
{ |
|
// virtualFaceData.quality = avgQuality; |
|
// virtualFaceData.color = sortedLuminViews[0].second; |
|
break; |
|
} |
|
} |
|
//*/ |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
else |
|
{ |
|
//* |
|
ASSERT(processedFaces > 0); |
|
// virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color /= processedFaces; |
|
#endif |
|
|
|
virtualFaceData.quality = 0; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
//*/ |
|
/* |
|
// 如果所有视图都被排除,保留原始平均值 |
|
if (validIndices.empty() || validViews.size() <= 0) { |
|
// virtualFaceData.quality = avgQuality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
// virtualFaceData.color = avgColor; |
|
#endif |
|
// virtualFaceData.quality = medianQuality; |
|
// virtualFaceData.color = medianColor; |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
const Color& viewColor = sortedViews[idx].second; |
|
float colorDistance = cv::norm(avgColor - viewColor); |
|
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation); |
|
|
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second * weight; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
//*/ |
|
} |
|
} |
|
else |
|
{ |
|
// 使用鲁棒的统计方法计算颜色和亮度的中心值 |
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality; |
|
const float medianLuminance = ComputeMedianLuminance(sortedViews); |
|
|
|
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值 |
|
const float colorMAD = ComputeColorMAD(sortedViews, medianColor); |
|
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance); |
|
|
|
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值) |
|
const float maxColorDeviation = 0.01f * colorMAD; |
|
const float maxLuminanceDeviation = 0.01f * luminanceMAD; |
|
|
|
std::vector<int> validIndices; |
|
for (int n = 0; n < sortedViews.size(); ++n) { |
|
const Color& viewColor = sortedViews[n].second; |
|
const float viewLuminance = MeshTexture::GetLuminance(viewColor); |
|
|
|
const float colorDistance = cv::norm(viewColor - medianColor); |
|
const float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
|
|
// if (colorDistance <= maxColorDeviation && |
|
// luminanceDistance <= maxLuminanceDeviation) |
|
{ |
|
validIndices.push_back(n); |
|
} |
|
} |
|
|
|
if (validIndices.empty()) { |
|
|
|
virtualFaceData.quality = medianQuality; |
|
virtualFaceData.color = medianColor; |
|
} |
|
else { |
|
// 使用过滤后的视图重新计算平均值 |
|
float totalQuality2 = 0.0f; |
|
Color totalColor2 = Color(0,0,0); |
|
for (int idx : validIndices) { |
|
totalQuality2 += validViews[idx].first; |
|
totalColor2 += validViews[idx].second; |
|
} |
|
virtualFaceData.quality = totalQuality2 / validIndices.size(); |
|
virtualFaceData.color = totalColor2 / validIndices.size(); |
|
} |
|
} |
|
|
|
// virtualFaceData.bInvalidFacesRelative = (invalidCount > 1); |
|
// virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3); |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
|
|
return true; |
|
} |
|
|
|
/* |
|
void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const |
|
{ |
|
float thMaxColorDeviation = 0.000001f; |
|
if (meshCurvatures.empty()) { |
|
ComputeFaceCurvatures(); |
|
} |
|
|
|
const float ratioAngleToQuality(0.67f); |
|
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation))); |
|
Mesh::FaceIdxArr remainingFaces(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
std::vector<bool> selectedFaces(faces.size(), false); |
|
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue; |
|
std::unordered_set<FIndex> queuedFaces; |
|
|
|
// Precompute average color for each face |
|
Colors faceColors; // 创建一个空列表 |
|
faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能) |
|
for (size_t i = 0; i < faces.size(); ++i) { |
|
faceColors.push_back(Color::ZERO); // 逐个添加元素 |
|
} |
|
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) { |
|
const FaceDataArr& faceDatas = facesDatas[idxFace]; |
|
if (faceDatas.empty()) continue; |
|
Color sumColor = Color::ZERO; |
|
for (const FaceData& fd : faceDatas) { |
|
sumColor += fd.color; |
|
} |
|
faceColors[idxFace] = sumColor / faceDatas.size(); |
|
} |
|
|
|
do { |
|
const FIndex startPos = RAND() % remainingFaces.size(); |
|
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos]; |
|
|
|
// 动态法线阈值 |
|
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID]; |
|
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域 |
|
const float dynamicCosTh = COS(FD2R(dynamicThreshold)); |
|
|
|
ASSERT(currentVirtualFaceQueue.IsEmpty()); |
|
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID]; |
|
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID]; |
|
|
|
// 检查中心面片是否包含无效视图 |
|
bool bHasInvalidView = false; |
|
int nInvalidViewCount = 0; |
|
int nTotalViewCount = 0; |
|
for (const FaceData& faceData : centerFaceDatas) { |
|
if (faceData.bInvalidFacesRelative) { |
|
bHasInvalidView = true; |
|
++nInvalidViewCount; |
|
} |
|
++nTotalViewCount; |
|
} |
|
|
|
std::vector<std::pair<float, Color>> sortedViews; |
|
std::vector<std::pair<float, Color>> sortedLuminViews; |
|
std::vector<std::pair<float, Color>> validViews; |
|
sortedViews.reserve(centerFaceDatas.size()); |
|
for (const FaceData& fd : centerFaceDatas) { |
|
if (fd.bInvalidFacesRelative) { |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
} else { |
|
sortedViews.emplace_back(fd.quality, fd.color); |
|
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color); |
|
validViews.emplace_back(fd.quality, fd.color); |
|
} |
|
} |
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
std::sort(validViews.begin(), validViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
|
|
int nSize = sortedViews.size(); |
|
// 计算初始平均值 |
|
float totalQuality = 0.0f; |
|
Color totalColor(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality += sortedViews[n].first; |
|
totalColor += sortedViews[n].second; |
|
} |
|
const float avgQuality = totalQuality / nSize; |
|
const Color avgColor = totalColor / nSize; |
|
|
|
float totalLuminance = MeshTexture::GetLuminance(totalColor); |
|
float avgLuminance = totalLuminance / nSize; |
|
std::sort(sortedLuminViews.begin(), sortedLuminViews.end(), |
|
[avgLuminance](const auto& a, const auto& b) { |
|
float luminDistA = cv::norm(avgLuminance - a.first); |
|
float luminDistB = cv::norm(avgLuminance - b.first); |
|
return luminDistA < luminDistB; }); |
|
|
|
// select the common cameras |
|
Mesh::FaceIdxArr virtualFace; |
|
FaceDataArr virtualFaceDatas; |
|
if (centerFaceDatas.empty()) { |
|
virtualFace.emplace_back(virtualFaceCenterFaceID); |
|
selectedFaces[virtualFaceCenterFaceID] = true; |
|
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
} else { |
|
const IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality); |
|
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID); |
|
queuedFaces.clear(); |
|
do { |
|
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead(); |
|
currentVirtualFaceQueue.PopHead(); |
|
// check for condition to add in current virtual face |
|
// normal angle smaller than thMaxNormalDeviation degrees |
|
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId]; |
|
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr())); |
|
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值 |
|
continue; |
|
// check if current face is seen by all cameras in selectedCams |
|
ASSERT(!selectedCams.empty()); |
|
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams)) |
|
continue; |
|
|
|
// Check color similarity |
|
const Color& centerColor = faceColors[virtualFaceCenterFaceID]; |
|
const Color& currentColor = faceColors[currentFaceId]; |
|
if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5) { |
|
float colorDistance = cv::norm(centerColor - currentColor); |
|
if (colorDistance > thMaxColorDeviation) |
|
{ |
|
continue; // Skip if color difference is too large |
|
} |
|
} |
|
|
|
// remove it from remaining faces and add it to the virtual face |
|
{ |
|
const auto posToErase = remainingFaces.FindFirst(currentFaceId); |
|
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX); |
|
remainingFaces.RemoveAtMove(posToErase); |
|
selectedFaces[currentFaceId] = true; |
|
virtualFace.push_back(currentFaceId); |
|
} |
|
// add all new neighbors to the queue |
|
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fIdx = ffaces[i]; |
|
if (fIdx == NO_ID) |
|
continue; |
|
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) { |
|
currentVirtualFaceQueue.AddTail(fIdx); |
|
queuedFaces.emplace(fIdx); |
|
} |
|
} |
|
} while (!currentVirtualFaceQueue.IsEmpty()); |
|
// compute virtual face quality and create virtual face |
|
for (IIndex idxView: selectedCams) { |
|
FaceData& virtualFaceData = virtualFaceDatas.emplace_back(); |
|
virtualFaceData.quality = 0; |
|
virtualFaceData.idxView = idxView; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color = Point3f::ZERO; |
|
#endif |
|
int invalidQuality = 0; |
|
Color invalidColor = Point3f::ZERO; |
|
unsigned processedFaces(0); |
|
bool bInvalidFacesRelative = false; |
|
int invalidCount = 0; |
|
for (FIndex fid : virtualFace) { |
|
const FaceDataArr& faceDatas = facesDatas[fid]; |
|
for (FaceData& faceData: faceDatas) { |
|
// 填充: 只处理当前视图的数据,累加质量和颜色 |
|
if (faceData.idxView == idxView) { |
|
virtualFaceData.quality += faceData.quality; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color += faceData.color; |
|
#endif |
|
processedFaces++; |
|
if (faceData.bInvalidFacesRelative) { |
|
invalidCount++; |
|
} |
|
break; // 每个面片每个视图只应有一个数据,找到后退出内层循环 |
|
} |
|
} |
|
} |
|
// 填充: 后处理,计算平均值和设置无效标志 |
|
if (processedFaces > 0) { |
|
virtualFaceData.quality /= processedFaces; |
|
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
virtualFaceData.color /= processedFaces; |
|
#endif |
|
virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces / 2); // 如果超过一半面片无效,则标记虚拟面无效 |
|
} else { |
|
// 如果没有找到任何数据,移除刚添加的virtualFaceData |
|
virtualFaceDatas.pop_back(); |
|
} |
|
} |
|
ASSERT(!virtualFaceDatas.empty()); |
|
} |
|
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas)); |
|
virtualFaces.emplace_back(std::move(virtualFace)); |
|
} while (!remainingFaces.empty()); |
|
} |
|
*/ |
|
#if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_MEDIAN |
|
|
|
// decrease the quality of / remove all views in which the face's projection |
|
// has a much different color than in the majority of views |
|
bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const |
|
{ |
|
// consider as outlier if the absolute difference to the median is outside this threshold |
|
if (thOutlier <= 0) |
|
thOutlier = 0.15f*255.f; |
|
|
|
// init colors array |
|
if (faceDatas.size() <= 3) |
|
return false; |
|
FloatArr channels[3]; |
|
for (int c=0; c<3; ++c) |
|
channels[c].resize(faceDatas.size()); |
|
FOREACH(i, faceDatas) { |
|
const Color& color = faceDatas[i].color; |
|
for (int c=0; c<3; ++c) |
|
channels[c][i] = color[c]; |
|
} |
|
|
|
// find median |
|
for (int c=0; c<3; ++c) |
|
channels[c].Sort(); |
|
const unsigned idxMedian(faceDatas.size() >> 1); |
|
Color median; |
|
for (int c=0; c<3; ++c) |
|
median[c] = channels[c][idxMedian]; |
|
|
|
// abort if there are not at least 3 inliers |
|
int nInliers(0); |
|
BoolArr inliers(faceDatas.size()); |
|
FOREACH(i, faceDatas) { |
|
const Color& color = faceDatas[i].color; |
|
for (int c=0; c<3; ++c) { |
|
if (ABS(median[c]-color[c]) > thOutlier) { |
|
inliers[i] = false; |
|
goto CONTINUE_LOOP; |
|
} |
|
} |
|
inliers[i] = true; |
|
++nInliers; |
|
CONTINUE_LOOP:; |
|
} |
|
if (nInliers == faceDatas.size()) |
|
return true; |
|
if (nInliers < 3) |
|
return false; |
|
|
|
// remove outliers |
|
RFOREACH(i, faceDatas) |
|
if (!inliers[i]) |
|
faceDatas.RemoveAt(i); |
|
return true; |
|
} |
|
|
|
#elif TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA |
|
|
|
// A multi-variate normal distribution which is NOT normalized such that the integral is 1 |
|
// - centered is the vector for which the function is to be evaluated with the mean subtracted [Nx1] |
|
// - X is the vector for which the function is to be evaluated [Nx1] |
|
// - mu is the mean around which the distribution is centered [Nx1] |
|
// - covarianceInv is the inverse of the covariance matrix [NxN] |
|
// return exp(-1/2 * (X-mu)^T * covariance_inv * (X-mu)) |
|
template <typename T, int N> |
|
inline T MultiGaussUnnormalized(const Eigen::Matrix<T,N,1>& centered, const Eigen::Matrix<T,N,N>& covarianceInv) { |
|
return EXP(T(-0.5) * T(centered.adjoint() * covarianceInv * centered)); |
|
} |
|
template <typename T, int N> |
|
inline T MultiGaussUnnormalized(const Eigen::Matrix<T,N,1>& X, const Eigen::Matrix<T,N,1>& mu, const Eigen::Matrix<T,N,N>& covarianceInv) { |
|
return MultiGaussUnnormalized<T,N>(X - mu, covarianceInv); |
|
} |
|
|
|
// decrease the quality of / remove all views in which the face's projection |
|
// has a much different color than in the majority of views |
|
bool MeshTexture::FaceOutlierDetection(FaceDataArr& faceDatas, float thOutlier) const |
|
{ |
|
// reject all views whose gauss value is below this threshold |
|
if (thOutlier <= 0) |
|
thOutlier = 6e-2f; |
|
|
|
const float minCovariance(1e-3f); // if all covariances drop below this the outlier detection aborted |
|
|
|
const unsigned maxIterations(10); |
|
const unsigned minInliers(4); |
|
|
|
// init colors array |
|
if (faceDatas.size() <= minInliers) |
|
return false; |
|
Eigen::Matrix3Xd colorsAll(3, faceDatas.size()); |
|
BoolArr inliers(faceDatas.size()); |
|
FOREACH(i, faceDatas) { |
|
colorsAll.col(i) = ((const Color::EVec)faceDatas[i].color).cast<double>(); |
|
inliers[i] = true; |
|
} |
|
|
|
// perform outlier removal; abort if something goes wrong |
|
// (number of inliers below threshold or can not invert the covariance) |
|
size_t numInliers(faceDatas.size()); |
|
Eigen::Vector3d mean; |
|
Eigen::Matrix3d covariance; |
|
Eigen::Matrix3d covarianceInv; |
|
for (unsigned iter = 0; iter < maxIterations; ++iter) { |
|
// compute the mean color and color covariance only for inliers |
|
const Eigen::Block<Eigen::Matrix3Xd,3,Eigen::Dynamic,!Eigen::Matrix3Xd::IsRowMajor> colors(colorsAll.leftCols(numInliers)); |
|
mean = colors.rowwise().mean(); |
|
const Eigen::Matrix3Xd centered(colors.colwise() - mean); |
|
covariance = (centered * centered.transpose()) / double(colors.cols() - 1); |
|
|
|
// stop if all covariances gets very small |
|
if (covariance.array().abs().maxCoeff() < minCovariance) { |
|
// remove the outliers |
|
RFOREACH(i, faceDatas) |
|
if (!inliers[i]) |
|
faceDatas.RemoveAt(i); |
|
return true; |
|
} |
|
|
|
// invert the covariance matrix |
|
// (FullPivLU not the fastest, but gives feedback about numerical stability during inversion) |
|
const Eigen::FullPivLU<Eigen::Matrix3d> lu(covariance); |
|
if (!lu.isInvertible()) |
|
return false; |
|
covarianceInv = lu.inverse(); |
|
|
|
// filter inliers |
|
// (all views with a gauss value above the threshold) |
|
numInliers = 0; |
|
bool bChanged(false); |
|
FOREACH(i, faceDatas) { |
|
const Eigen::Vector3d color(((const Color::EVec)faceDatas[i].color).cast<double>()); |
|
const double gaussValue(MultiGaussUnnormalized<double,3>(color, mean, covarianceInv)); |
|
bool& inlier = inliers[i]; |
|
if (gaussValue > thOutlier) { |
|
// set as inlier |
|
colorsAll.col(numInliers++) = color; |
|
if (inlier != true) { |
|
inlier = true; |
|
bChanged = true; |
|
} |
|
} else { |
|
// set as outlier |
|
if (inlier != false) { |
|
inlier = false; |
|
bChanged = true; |
|
} |
|
} |
|
} |
|
if (numInliers == faceDatas.size()) |
|
return true; |
|
if (numInliers < minInliers) |
|
return false; |
|
if (!bChanged) |
|
break; |
|
} |
|
|
|
#if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_GAUSS_DAMPING |
|
// select the final inliers |
|
const float factorOutlierRemoval(0.2f); |
|
covarianceInv *= factorOutlierRemoval; |
|
RFOREACH(i, faceDatas) { |
|
const Eigen::Vector3d color(((const Color::EVec)faceDatas[i].color).cast<double>()); |
|
const double gaussValue(MultiGaussUnnormalized<double,3>(color, mean, covarianceInv)); |
|
ASSERT(gaussValue >= 0 && gaussValue <= 1); |
|
faceDatas[i].quality *= gaussValue; |
|
} |
|
#endif |
|
#if TEXOPT_FACEOUTLIER == TEXOPT_FACEOUTLIER_GAUSS_CLAMPING |
|
// remove outliers |
|
RFOREACH(i, faceDatas) |
|
if (!inliers[i]) |
|
faceDatas.RemoveAt(i); |
|
#endif |
|
return true; |
|
} |
|
#endif |
|
|
|
bool MeshTexture::FaceViewSelection( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views) |
|
{ |
|
// extract array of triangles incident to each vertex |
|
ListVertexFaces(); |
|
|
|
// create texture patches |
|
{ |
|
// compute face normals and smoothen them |
|
scene.mesh.SmoothNormalFaces(); |
|
|
|
const bool bUseVirtualFaces(minCommonCameras > 0); |
|
|
|
// list all views for each face |
|
FaceDataViewArr facesDatas; |
|
if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) |
|
return false; |
|
|
|
// create faces graph |
|
typedef boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> Graph; |
|
typedef boost::graph_traits<Graph>::edge_iterator EdgeIter; |
|
typedef boost::graph_traits<Graph>::out_edge_iterator EdgeOutIter; |
|
Graph graph; |
|
LabelArr labels; |
|
|
|
// construct and use virtual faces for patch creation instead of actual mesh faces; |
|
// the virtual faces are composed of coplanar triangles sharing same views |
|
if (bUseVirtualFaces) { |
|
// 1) create FaceToVirtualFaceMap |
|
FaceDataViewArr virtualFacesDatas; |
|
VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID |
|
CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it |
|
size_t controlCounter(0); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
mapFaceToVirtualFace[idxFace] = idxVF; |
|
++controlCounter; |
|
} |
|
} |
|
ASSERT(controlCounter == faces.size()); |
|
// 2) create function to find virtual faces neighbors |
|
VirtualFaceIdxsArr virtualFaceNeighbors; { // for each virtual face, the list of virtual faces with at least one vertex in common |
|
virtualFaceNeighbors.resize(virtualFaces.size()); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fAdj(adjFaces[i]); |
|
if (fAdj == NO_ID) |
|
continue; |
|
if (mapFaceToVirtualFace[fAdj] == idxVF) |
|
continue; |
|
if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { |
|
vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
// 3) use virtual faces to build the graph |
|
// 4) assign images to virtual faces |
|
// 5) spread image ID to each mesh face from virtual face |
|
FOREACH(idxFace, virtualFaces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
FOREACH(idxVirtualFace, virtualFaces) { |
|
const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; |
|
for (FIndex idxVirtualFaceAdj: afaces) { |
|
if (idxVirtualFace >= idxVirtualFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); |
|
const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) |
|
continue; |
|
boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); |
|
} |
|
} |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); |
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(virtualFaces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, virtualFaces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
//* |
|
// set data costs for all labels (except label 0 - undefined) |
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
//*/ |
|
/* |
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 2; // 当可用视角<=2时强制单视图 |
|
|
|
// 当视角不足时,只保留最佳视角 |
|
// if (numViews <= minSingleView) { |
|
if (true) { |
|
// 找到质量最高的视角 |
|
float maxQuality = 0; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
// 只设置最佳视角的数据项,其他设为MaxEnergy |
|
for (const FaceData& fd : faceDatas) { |
|
const Label label = (Label)fd.idxView + 1; |
|
// const float cost = (fd.idxView == bestView) ? |
|
// (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// MaxEnergy; |
|
const float cost = (fd.idxView == bestView) ? |
|
(1.f - fd.quality/normQuality) * MaxEnergy : |
|
MaxEnergy; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
else { |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
// const float normalizedQuality = faceData.quality/normQuality; |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
LabelArr virtualLabels(virtualFaces.size()); |
|
virtualLabels.Memset(0xFF); |
|
FOREACH(l, virtualLabels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
virtualLabels[l] = label-1; |
|
} |
|
FOREACH(l, labels) { |
|
labels[l] = virtualLabels[mapFaceToVirtualFace[l]]; |
|
} |
|
#endif |
|
} |
|
|
|
graph.clear(); |
|
|
|
/* |
|
// 标记虚拟面边界为接缝 |
|
FOREACH(idxVF, virtualFaces) { |
|
const auto& vf = virtualFaces[idxVF]; |
|
for (FIndex fid : vf) { |
|
const auto& adjFaces = faceFaces[fid]; |
|
for (int i=0; i<3; ++i) { |
|
if (adjFaces[i] == NO_ID) continue; |
|
const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; |
|
if (adjVF != idxVF) { |
|
seamEdges.emplace_back(fid, adjFaces[i]); |
|
} |
|
} |
|
} |
|
} |
|
//*/ |
|
} |
|
|
|
/* |
|
#if TEXOPT_USE_ANISOTROPIC |
|
const int anisoLevel = 8; // 设置各向异性过滤级别 |
|
for (auto& tex : textures) { |
|
tex.SetFilterMode(Texture::ANISOTROPIC); |
|
tex.SetAnisotropy(anisoLevel); |
|
} |
|
#endif |
|
//*/ |
|
|
|
// create the graph of faces: each vertex is a face and the edges are the edges shared by the faces |
|
FOREACH(idxFace, faces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
FOREACH(idxFace, faces) { |
|
const Mesh::FaceFaces& afaces = faceFaces[idxFace]; |
|
for (int v=0; v<3; ++v) { |
|
const FIndex idxFaceAdj = afaces[v]; |
|
if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(facesDatas[idxFace].empty()); |
|
const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) { |
|
if (bInvisibleFace != bInvisibleFaceAdj) |
|
seamEdges.emplace_back(idxFace, idxFaceAdj); |
|
continue; |
|
} |
|
boost::add_edge(idxFace, idxFaceAdj, graph); |
|
} |
|
} |
|
faceFaces.Release(); |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); |
|
|
|
// LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; |
|
// start patch creation starting directly from individual faces |
|
if (!bUseVirtualFaces) { |
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(faces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
//* |
|
// set data costs for all labels (except label 0 - undefined) |
|
FOREACH(f, facesDatas) { |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
unsigned minViews=3; |
|
float dataWeightFactor=2.0f; |
|
// LOG_OUT() << "FaceViewSelection numViews=" << numViews << std::endl; |
|
const float factor = (numViews < minViews) ? dataWeightFactor : 1.0f; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
// const float dataCost((1.f-normalizedQuality)*MaxEnergy * factor); |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
//*/ |
|
/* |
|
FOREACH(f, facesDatas) { |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 2; // 当可用视角<=5时强制单视图 |
|
|
|
// 当视角不足时,只保留最佳视角 |
|
// if (numViews <= minSingleView) { |
|
if (true) { |
|
// 找到质量最高的视角 |
|
float maxQuality = 0; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
// 只设置最佳视角的数据项,其他设为MaxEnergy |
|
for (const FaceData& fd : faceDatas) { |
|
const Label label = (Label)fd.idxView + 1; |
|
// const float cost = (fd.idxView == bestView) ? |
|
// (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// MaxEnergy; |
|
const float cost = (fd.idxView == bestView) ? |
|
(1.f - fd.quality/normQuality) * MaxEnergy : |
|
0; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
else { |
|
// 正常处理多视角情况 |
|
for (const FaceData& faceData : faceDatas) { |
|
const Label label = (Label)faceData.idxView + 1; |
|
const float normalizedQuality = faceData.quality/normQuality; |
|
const float dataCost = (1.f - normalizedQuality) * MaxEnergy; |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
labels[l] = label-1; |
|
} |
|
#endif |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS |
|
// find connected components |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// map face ID from global to component space |
|
typedef cList<NodeID, NodeID, 0, 128, NodeID> NodeIDs; |
|
NodeIDs nodeIDs(faces.size()); |
|
NodeIDs sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
nodeIDs[c] = sizes[components[c]]++; |
|
|
|
// initialize inference structures |
|
const LabelID numLabels(images.size()+1); |
|
CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); |
|
FOREACH(s, sizes) { |
|
const NodeID numNodes(sizes[s]); |
|
ASSERT(numNodes > 0); |
|
if (numNodes <= 1) |
|
continue; |
|
TRWSInference& inference = inferences[s]; |
|
inference.Init(numNodes, numLabels); |
|
} |
|
|
|
// set data costs |
|
{ |
|
// add nodes |
|
CLISTDEF0(EnergyType) D(numLabels); |
|
FOREACH(f, facesDatas) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
D.MemsetValue(MaxEnergy); |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); |
|
D[label] = dataCost; |
|
} |
|
const NodeID nodeID(nodeIDs[f]); |
|
inference.AddNode(nodeID, D.Begin()); |
|
} |
|
// add edges |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
ASSERT(components[f] == components[fAdj]); |
|
if (f < fAdj) // add edges only once |
|
inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); |
|
} |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)inferences.size(); ++i) { |
|
#else |
|
FOREACH(i, inferences) { |
|
#endif |
|
TRWSInference& inference = inferences[i]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
inference.Optimize(); |
|
} |
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
TRWSInference& inference = inferences[components[l]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
const Label label(inference.GetLabel(nodeIDs[l])); |
|
ASSERT(label >= 0 && label < numLabels); |
|
if (label < images.size()) |
|
labels[l] = label; |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
// create texture patches |
|
{ |
|
// divide graph in sub-graphs of connected faces having the same label |
|
EdgeIter ei, eie; |
|
const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); |
|
for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { |
|
const FIndex fSource((FIndex)ei->m_source); |
|
const FIndex fTarget((FIndex)ei->m_target); |
|
ASSERT(components.empty() || components[fSource] == components[fTarget]); |
|
if (labels[fSource] != labels[fTarget]) |
|
seamEdges.emplace_back(fSource, fTarget); |
|
} |
|
for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) |
|
boost::remove_edge(pEdge->i, pEdge->j, graph); |
|
|
|
// find connected components: texture patches |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// create texture patches; |
|
// last texture patch contains all faces with no texture |
|
LabelArr sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
++sizes[components[c]]; |
|
texturePatches.resize(nComponents+1); |
|
texturePatches.back().label = NO_ID; |
|
FOREACH(f, faces) { |
|
const Label label(labels[f]); |
|
const FIndex c(components[f]); |
|
TexturePatch& texturePatch = texturePatches[c]; |
|
ASSERT(texturePatch.label == label || texturePatch.faces.empty()); |
|
if (label == NO_ID) { |
|
texturePatch.label = NO_ID; |
|
texturePatches.back().faces.Insert(f); |
|
} else { |
|
if (texturePatch.faces.empty()) { |
|
texturePatch.label = label; |
|
texturePatch.faces.reserve(sizes[c]); |
|
} |
|
texturePatch.faces.Insert(f); |
|
} |
|
} |
|
// remove all patches with invalid label (except the last one) |
|
// and create the map from the old index to the new one |
|
mapIdxPatch.resize(nComponents); |
|
std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); |
|
for (FIndex t = nComponents; t-- > 0; ) { |
|
if (texturePatches[t].label == NO_ID) { |
|
texturePatches.RemoveAtMove(t); |
|
mapIdxPatch.RemoveAtMove(t); |
|
} |
|
} |
|
const unsigned numPatches(texturePatches.size()-1); |
|
uint32_t idxPatch(0); |
|
for (IndexArr::IDX i=0; i<mapIdxPatch.size(); ++i) { |
|
while (i < mapIdxPatch[i]) |
|
mapIdxPatch.InsertAt(i++, numPatches); |
|
mapIdxPatch[i] = idxPatch++; |
|
} |
|
while (mapIdxPatch.size() <= nComponents) |
|
mapIdxPatch.Insert(numPatches); |
|
} |
|
} |
|
return true; |
|
} |
|
|
|
bool MeshTexture::FaceViewSelection2( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views) |
|
{ |
|
// extract array of triangles incident to each vertex |
|
ListVertexFaces(); |
|
|
|
// create texture patches |
|
{ |
|
// compute face normals and smoothen them |
|
scene.mesh.SmoothNormalFaces(); |
|
|
|
const bool bUseVirtualFaces(minCommonCameras > 0); |
|
|
|
// list all views for each face |
|
FaceDataViewArr facesDatas; |
|
if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) |
|
return false; |
|
|
|
// create faces graph |
|
typedef boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> Graph; |
|
typedef boost::graph_traits<Graph>::edge_iterator EdgeIter; |
|
typedef boost::graph_traits<Graph>::out_edge_iterator EdgeOutIter; |
|
Graph graph; |
|
LabelArr labels; |
|
|
|
// construct and use virtual faces for patch creation instead of actual mesh faces; |
|
// the virtual faces are composed of coplanar triangles sharing same views |
|
if (bUseVirtualFaces) { |
|
|
|
// 1) create FaceToVirtualFaceMap |
|
FaceDataViewArr virtualFacesDatas; |
|
VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID |
|
|
|
CreateAdaptiveVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
|
|
// CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it |
|
size_t controlCounter(0); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
mapFaceToVirtualFace[idxFace] = idxVF; |
|
++controlCounter; |
|
} |
|
} |
|
ASSERT(controlCounter == faces.size()); |
|
|
|
// 2) create function to find virtual faces neighbors |
|
VirtualFaceIdxsArr virtualFaceNeighbors; { |
|
virtualFaceNeighbors.resize(virtualFaces.size()); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; |
|
|
|
printf("Processing virtual face %zu/%zu\n", idxVF, virtualFaces.size()); |
|
|
|
for (FIndex idxFace : vf) { |
|
const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fAdj(adjFaces[i]); |
|
// 关键修复:添加边界检查 |
|
if (fAdj == NO_ID) continue; |
|
if (fAdj >= mapFaceToVirtualFace.size()) { |
|
continue; |
|
} |
|
|
|
if (mapFaceToVirtualFace[fAdj] == idxVF) |
|
continue; |
|
|
|
if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { |
|
vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
// 3) use virtual faces to build the graph |
|
FOREACH(idxFace, virtualFaces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
|
|
FOREACH(idxVirtualFace, virtualFaces) { |
|
const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; |
|
for (FIndex idxVirtualFaceAdj: afaces) { |
|
if (idxVirtualFace >= idxVirtualFaceAdj) |
|
continue; |
|
|
|
// 关键修复:添加有效性检查 |
|
if (idxVirtualFace >= virtualFacesDatas.size() || |
|
idxVirtualFaceAdj >= virtualFacesDatas.size()) { |
|
printf("Skipping invalid virtual face pair: %u, %u\n", |
|
idxVirtualFace, idxVirtualFaceAdj); |
|
continue; |
|
} |
|
|
|
const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); |
|
const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) |
|
continue; |
|
|
|
boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); |
|
} |
|
} |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); |
|
|
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
|
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
// const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
const float normQuality(hist.GetApproximatePermille(0.8f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(virtualFaces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, virtualFaces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
// inference.SetDataCost((Label)0, f, 0); |
|
} |
|
} |
|
|
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 2; // 当可用视角<=2时强制单视图 |
|
|
|
// 当视角不足时,只保留最佳视角 |
|
if (numViews <= minSingleView) { |
|
// if (true) { |
|
// 找到质量最高的视角 |
|
float maxQuality = 0; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
// 只设置最佳视角的数据项,其他设为MaxEnergy |
|
for (const FaceData& fd : faceDatas) { |
|
const Label label = (Label)fd.idxView + 1; |
|
// const float cost = (fd.idxView == bestView) ? |
|
// (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// MaxEnergy; |
|
const float cost = (fd.idxView == bestView) ? |
|
(1.f - fd.quality/normQuality) * MaxEnergy : |
|
0; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
else { |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float cost = (faceData.quality>=normQuality) ? |
|
(1.f - faceData.quality/normQuality) * MaxEnergy : |
|
0; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
LabelArr virtualLabels(virtualFaces.size()); |
|
virtualLabels.Memset(0xFF); |
|
FOREACH(l, virtualLabels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
virtualLabels[l] = label-1; |
|
} |
|
// 在标签传播部分添加边界检查 |
|
FOREACH(l, labels) { |
|
const size_t virtualIdx = mapFaceToVirtualFace[l]; |
|
if (virtualIdx < virtualLabels.size()) { |
|
labels[l] = virtualLabels[virtualIdx]; |
|
} else { |
|
// 处理无效索引情况 |
|
labels[l] = NO_ID; |
|
// printf("警告:虚拟面索引 %zu 超出范围 (最大 %zu)\n", |
|
// virtualIdx, virtualLabels.size()-1); |
|
} |
|
} |
|
|
|
// 在添加边的部分添加有效性检查 |
|
FOREACH(idxVirtualFace, virtualFaces) { |
|
const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; |
|
for (FIndex idxVirtualFaceAdj: afaces) { |
|
if (idxVirtualFace >= idxVirtualFaceAdj) |
|
continue; |
|
|
|
// 添加有效性检查 |
|
if (idxVirtualFace >= virtualFaces.size() || |
|
idxVirtualFaceAdj >= virtualFaces.size()) { |
|
// printf("跳过无效虚拟面对:%u, %u (最大 %zu)\n", |
|
// idxVirtualFace, idxVirtualFaceAdj, virtualFaces.size()-1); |
|
continue; |
|
} |
|
|
|
const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); |
|
const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) |
|
continue; |
|
|
|
boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); |
|
} |
|
} |
|
#endif |
|
} |
|
|
|
graph.clear(); |
|
} |
|
|
|
// create the graph of faces: each vertex is a face and the edges are the edges shared by the faces |
|
FOREACH(idxFace, faces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
FOREACH(idxFace, faces) { |
|
const Mesh::FaceFaces& afaces = faceFaces[idxFace]; |
|
for (int v=0; v<3; ++v) { |
|
const FIndex idxFaceAdj = afaces[v]; |
|
if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(facesDatas[idxFace].empty()); |
|
const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) { |
|
if (bInvisibleFace != bInvisibleFaceAdj) |
|
seamEdges.emplace_back(idxFace, idxFaceAdj); |
|
continue; |
|
} |
|
boost::add_edge(idxFace, idxFaceAdj, graph); |
|
} |
|
} |
|
faceFaces.Release(); |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); |
|
|
|
// LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; |
|
// start patch creation starting directly from individual faces |
|
if (!bUseVirtualFaces) { |
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(faces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
FOREACH(f, facesDatas) { |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 2; // 当可用视角<=5时强制单视图 |
|
|
|
// 当视角不足时,只保留最佳视角 |
|
if (numViews <= minSingleView) { |
|
// if (true) { |
|
// 找到质量最高的视角 |
|
float maxQuality = 0; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
// 只设置最佳视角的数据项,其他设为MaxEnergy |
|
for (const FaceData& fd : faceDatas) { |
|
const Label label = (Label)fd.idxView + 1; |
|
// const float cost = (fd.idxView == bestView) ? |
|
// (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// MaxEnergy; |
|
const float cost = (fd.idxView == bestView) ? |
|
(1.f - fd.quality/normQuality) * MaxEnergy : |
|
0; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
else { |
|
// 正常处理多视角情况 |
|
for (const FaceData& faceData : faceDatas) { |
|
const Label label = (Label)faceData.idxView + 1; |
|
const float normalizedQuality = faceData.quality/normQuality; |
|
const float dataCost = (1.f - normalizedQuality) * MaxEnergy; |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
labels[l] = label-1; |
|
} |
|
#endif |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS |
|
// find connected components |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// map face ID from global to component space |
|
typedef cList<NodeID, NodeID, 0, 128, NodeID> NodeIDs; |
|
NodeIDs nodeIDs(faces.size()); |
|
NodeIDs sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
nodeIDs[c] = sizes[components[c]]++; |
|
|
|
// initialize inference structures |
|
const LabelID numLabels(images.size()+1); |
|
CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); |
|
FOREACH(s, sizes) { |
|
const NodeID numNodes(sizes[s]); |
|
ASSERT(numNodes > 0); |
|
if (numNodes <= 1) |
|
continue; |
|
TRWSInference& inference = inferences[s]; |
|
inference.Init(numNodes, numLabels); |
|
} |
|
|
|
// set data costs |
|
{ |
|
// add nodes |
|
CLISTDEF0(EnergyType) D(numLabels); |
|
FOREACH(f, facesDatas) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
D.MemsetValue(MaxEnergy); |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); |
|
D[label] = dataCost; |
|
} |
|
const NodeID nodeID(nodeIDs[f]); |
|
inference.AddNode(nodeID, D.Begin()); |
|
} |
|
// add edges |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
ASSERT(components[f] == components[fAdj]); |
|
if (f < fAdj) // add edges only once |
|
inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); |
|
} |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)inferences.size(); ++i) { |
|
#else |
|
FOREACH(i, inferences) { |
|
#endif |
|
TRWSInference& inference = inferences[i]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
inference.Optimize(); |
|
} |
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
TRWSInference& inference = inferences[components[l]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
const Label label(inference.GetLabel(nodeIDs[l])); |
|
ASSERT(label >= 0 && label < numLabels); |
|
if (label < images.size()) |
|
labels[l] = label; |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
// create texture patches |
|
{ |
|
// divide graph in sub-graphs of connected faces having the same label |
|
EdgeIter ei, eie; |
|
const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); |
|
for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { |
|
const FIndex fSource((FIndex)ei->m_source); |
|
const FIndex fTarget((FIndex)ei->m_target); |
|
ASSERT(components.empty() || components[fSource] == components[fTarget]); |
|
if (labels[fSource] != labels[fTarget]) |
|
seamEdges.emplace_back(fSource, fTarget); |
|
} |
|
for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) |
|
boost::remove_edge(pEdge->i, pEdge->j, graph); |
|
|
|
// find connected components: texture patches |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// create texture patches; |
|
// last texture patch contains all faces with no texture |
|
LabelArr sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
++sizes[components[c]]; |
|
texturePatches.resize(nComponents+1); |
|
texturePatches.back().label = NO_ID; |
|
FOREACH(f, faces) { |
|
const Label label(labels[f]); |
|
const FIndex c(components[f]); |
|
TexturePatch& texturePatch = texturePatches[c]; |
|
ASSERT(texturePatch.label == label || texturePatch.faces.empty()); |
|
if (label == NO_ID) { |
|
texturePatch.label = NO_ID; |
|
texturePatches.back().faces.Insert(f); |
|
} else { |
|
if (texturePatch.faces.empty()) { |
|
texturePatch.label = label; |
|
texturePatch.faces.reserve(sizes[c]); |
|
} |
|
texturePatch.faces.Insert(f); |
|
} |
|
} |
|
// remove all patches with invalid label (except the last one) |
|
// and create the map from the old index to the new one |
|
mapIdxPatch.resize(nComponents); |
|
std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); |
|
for (FIndex t = nComponents; t-- > 0; ) { |
|
if (texturePatches[t].label == NO_ID) { |
|
texturePatches.RemoveAtMove(t); |
|
mapIdxPatch.RemoveAtMove(t); |
|
} |
|
} |
|
const unsigned numPatches(texturePatches.size()-1); |
|
uint32_t idxPatch(0); |
|
for (IndexArr::IDX i=0; i<mapIdxPatch.size(); ++i) { |
|
while (i < mapIdxPatch[i]) |
|
mapIdxPatch.InsertAt(i++, numPatches); |
|
mapIdxPatch[i] = idxPatch++; |
|
} |
|
while (mapIdxPatch.size() <= nComponents) |
|
mapIdxPatch.Insert(numPatches); |
|
} |
|
} |
|
|
|
return true; |
|
} |
|
|
|
void MeshTexture::CreateAdaptiveVirtualFaces( |
|
FaceDataViewArr& facesDatas, |
|
FaceDataViewArr& virtualFacesDatas, |
|
VirtualFaceIdxsArr& virtualFaces, |
|
unsigned minCommonCameras) |
|
{ |
|
// === 1. 初始化数据结构 === |
|
Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size(), NO_ID); |
|
std::vector<bool> processed(faces.size(), false); |
|
std::vector<Mesh::FaceIdxArr> tmpVirtualFaces; |
|
|
|
// === 2. 核心合并逻辑(基于共视相机条件)=== |
|
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) |
|
{ |
|
if (processed[idxFace]) continue; |
|
|
|
// 创建新虚拟面片 |
|
Mesh::FaceIdxArr newVirtualFace; |
|
newVirtualFace.emplace_back(idxFace); |
|
processed[idxFace] = true; |
|
|
|
// 广度优先搜索合并相邻面 |
|
std::queue<FIndex> faceQueue; |
|
faceQueue.push(idxFace); |
|
|
|
while (!faceQueue.empty()) |
|
{ |
|
FIndex current = faceQueue.front(); |
|
faceQueue.pop(); |
|
|
|
// 遍历相邻面 |
|
const Mesh::FaceFaces& adjFaces = faceFaces[current]; |
|
for (int i = 0; i < 3; ++i) |
|
{ |
|
FIndex neighbor = adjFaces[i]; |
|
if (neighbor == NO_ID || processed[neighbor]) continue; |
|
|
|
// 关键修改:仅依赖共视条件判断 |
|
if (ShouldMergeVirtualFace(facesDatas, newVirtualFace, neighbor, minCommonCameras)) |
|
{ |
|
newVirtualFace.emplace_back(neighbor); |
|
processed[neighbor] = true; |
|
faceQueue.push(neighbor); |
|
} |
|
} |
|
} |
|
|
|
// 保存有效虚拟面片 |
|
if (newVirtualFace.size() > 1) |
|
{ |
|
tmpVirtualFaces.emplace_back(newVirtualFace); |
|
} |
|
else // 单一面片恢复状态 |
|
{ |
|
processed[idxFace] = false; |
|
} |
|
} |
|
|
|
// === 3. 处理独立面片 === |
|
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) |
|
{ |
|
if (processed[idxFace] || mapFaceToVirtualFace[idxFace] != NO_ID) continue; |
|
|
|
// 创建单面虚拟面片 |
|
const size_t newIdx = virtualFaces.size(); |
|
virtualFaces.emplace_back(Mesh::FaceIdxArr{idxFace}); |
|
|
|
mapFaceToVirtualFace[idxFace] = virtualFaces.size() - 1; |
|
// 更新映射和数据 |
|
// mapFaceToVirtualFace[idxFace] = newIdx; |
|
virtualFacesDatas.emplace_back(facesDatas[idxFace]); |
|
} |
|
|
|
// === 4. 整合合并结果 === |
|
for (auto& vf : tmpVirtualFaces) |
|
{ |
|
// 合并面片数据 |
|
FaceDataArr mergedData; |
|
for (FIndex f : vf) |
|
{ |
|
for (auto& data : facesDatas[f]) |
|
{ |
|
mergedData.emplace_back(data); |
|
} |
|
} |
|
|
|
// 相机视角去重 |
|
std::sort(mergedData.begin(), mergedData.end(), |
|
[](const FaceData& a, const FaceData& b) { |
|
return a.idxView < b.idxView; |
|
}); |
|
auto last = std::unique(mergedData.begin(), mergedData.end(), |
|
[](const FaceData& a, const FaceData& b) { |
|
return a.idxView == b.idxView; |
|
}); |
|
// 排序(确保相同相机ID连续) |
|
mergedData.Sort([](const FaceData& a, const FaceData& b) { |
|
return a.idxView < b.idxView; |
|
}); |
|
|
|
for (unsigned idx = 0; idx < mergedData.GetSize(); ) { |
|
if (idx + 1 < mergedData.GetSize() && |
|
mergedData[idx].idxView == mergedData[idx+1].idxView) |
|
{ |
|
mergedData.RemoveAt(idx+1); |
|
} else { |
|
idx++; // 只有不删除时才增加索引 |
|
} |
|
} |
|
|
|
// 保存结果 |
|
virtualFaces.emplace_back(vf); |
|
const size_t newIdx = virtualFaces.size()-1; // 获取实际索引 |
|
for (FIndex f : vf) { |
|
mapFaceToVirtualFace[f] = newIdx; |
|
} |
|
|
|
virtualFacesDatas.emplace_back(mergedData); |
|
} |
|
|
|
} |
|
|
|
// === 辅助函数:检查是否满足合并条件 === |
|
bool MeshTexture::ShouldMergeVirtualFace( |
|
const MeshTexture::FaceDataViewArr& facesDatas, |
|
const Mesh::FaceIdxArr& currentVirtualFace, |
|
FIndex candidateFace, |
|
unsigned minCommonCameras) |
|
{ |
|
// 1. 获取当前虚拟面片的相机集合 |
|
std::set<IIndex> currentCams; |
|
for (FIndex f : currentVirtualFace) |
|
{ |
|
for (const auto& data : facesDatas[f]) |
|
{ |
|
currentCams.insert(data.idxView); |
|
} |
|
} |
|
|
|
// 2. 检查候选面片与虚拟面片的共视率 |
|
int commonCount = 0; |
|
for (const auto& data : facesDatas[candidateFace]) |
|
{ |
|
if (currentCams.find(data.idxView) != currentCams.end()) |
|
{ |
|
if (++commonCount >= minCommonCameras) |
|
{ |
|
return true; |
|
} |
|
} |
|
} |
|
|
|
for (IIndex view : currentCams) { |
|
bool valid = false; |
|
for (const FaceData& fd : facesDatas[candidateFace]) { |
|
if (fd.idxView == view && !fd.bInvalidFacesRelative) { |
|
valid = true; |
|
break; |
|
} |
|
} |
|
if (!valid) return false; // 存在无效视图 |
|
} |
|
return true; |
|
|
|
// return false; |
|
} |
|
|
|
bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views) |
|
{ |
|
// extract array of triangles incident to each vertex |
|
ListVertexFaces(); |
|
|
|
// create texture patches |
|
{ |
|
// printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); |
|
// compute face normals and smoothen them |
|
scene.mesh.SmoothNormalFaces(); |
|
// printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); |
|
|
|
bool bUseVirtualFaces(minCommonCameras > 0); |
|
|
|
// list all views for each face |
|
FaceDataViewArr facesDatas; |
|
if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) |
|
return false; |
|
|
|
// create faces graph |
|
typedef boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> Graph; |
|
typedef boost::graph_traits<Graph>::edge_iterator EdgeIter; |
|
typedef boost::graph_traits<Graph>::out_edge_iterator EdgeOutIter; |
|
Graph graph; |
|
LabelArr labels; |
|
|
|
// 为每个面片单独决定是否使用虚拟面算法 |
|
// Mesh::FaceIdxArr virtualFacesI; // 使用虚拟面算法的面片 |
|
Mesh::FaceIdxArr perFaceFaces; // 使用逐面算法的面片 |
|
|
|
/* |
|
bool bVirtualFacesSuccess = false; |
|
if (bUseVirtualFaces) |
|
{ |
|
// 1) create FaceToVirtualFaceMap |
|
FaceDataViewArr virtualFacesDatas; |
|
VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID |
|
|
|
bVirtualFacesSuccess = CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
if (!virtualFaces.empty()) |
|
{ |
|
bVirtualFacesSuccess = true; |
|
} |
|
if (!bVirtualFacesSuccess) { |
|
bUseVirtualFaces = false; |
|
DEBUG_EXTRA("Warning: Failed to create virtual faces. Falling back to per-face view selection."); |
|
} |
|
} |
|
*/ |
|
|
|
// construct and use virtual faces for patch creation instead of actual mesh faces; |
|
// the virtual faces are composed of coplanar triangles sharing same views |
|
if (bUseVirtualFaces) { |
|
Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it |
|
// 标记使用虚拟面算法的面片 |
|
std::vector<bool> isVirtualFace(faces.size(), true); |
|
// 1) create FaceToVirtualFaceMap |
|
FaceDataViewArr virtualFacesDatas; |
|
VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID |
|
// CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
// CreateVirtualFaces3(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
// CreateVirtualFaces4(facesDatas, virtualFacesDatas, virtualFaces, mapFaceToVirtualFace, minCommonCameras); |
|
CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras); |
|
TD_TIMER_STARTD(); |
|
// CreateVirtualFaces7(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras); |
|
DEBUG_EXTRA("CreateVirtualFaces7 completed: %s", TD_TIMER_GET_FMT().c_str()); |
|
|
|
size_t controlCounter(0); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
mapFaceToVirtualFace[idxFace] = idxVF; |
|
// isVirtualFace[idxFace] = true; |
|
// virtualFacesI.push_back(idxFace); |
|
++controlCounter; |
|
} |
|
} |
|
|
|
// 标记使用逐面算法的面片 |
|
FOREACH(f, faces) { |
|
if (isVirtualFace[f]) { |
|
perFaceFaces.push_back(f); |
|
} |
|
} |
|
|
|
// printf("perFaceFaces.size = %d\n", perFaceFaces.size()); |
|
|
|
ASSERT(controlCounter == faces.size()); |
|
// 2) create function to find virtual faces neighbors |
|
VirtualFaceIdxsArr virtualFaceNeighbors; |
|
{ // for each virtual face, the list of virtual faces with at least one vertex in common |
|
virtualFaceNeighbors.resize(virtualFaces.size()); |
|
//* |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fAdj(adjFaces[i]); |
|
if (fAdj == NO_ID) |
|
continue; |
|
if (mapFaceToVirtualFace[fAdj] == idxVF) |
|
continue; |
|
if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { |
|
vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); |
|
} |
|
} |
|
} |
|
} |
|
//*/ |
|
} |
|
// 3) use virtual faces to build the graph |
|
// 4) assign images to virtual faces |
|
// 5) spread image ID to each mesh face from virtual face |
|
FOREACH(idxFace, virtualFaces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
//* |
|
FOREACH(idxVirtualFace, virtualFaces) { |
|
const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; |
|
for (FIndex idxVirtualFaceAdj: afaces) { |
|
if (idxVirtualFace >= idxVirtualFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); |
|
const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) |
|
continue; |
|
boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); |
|
} |
|
} |
|
//*/ |
|
|
|
// 这里通过深度图判断virtualFaces是否要为invalid |
|
// CheckInvalidFaces(virtualFacesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) |
|
|
|
printf("virtualFacesDatas.size()=%d, facesDatas.size()=%d\n", virtualFacesDatas.size(), facesDatas.size()); |
|
|
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); |
|
// assign the best view to each face |
|
labels.resize(faces.size()); |
|
labelsInvalid.resize(faces.size()); |
|
|
|
FOREACH(l, labelsInvalid) { |
|
labelsInvalid[l] = NO_ID; |
|
} |
|
|
|
{ |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
// const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
const float normQuality(hist.GetApproximatePermille(0.8f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(virtualFaces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, virtualFaces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
// inference.SetDataCost((Label)0, f, 0); |
|
} |
|
} |
|
|
|
/* |
|
// set data costs for all labels (except label 0 - undefined) |
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
//*/ |
|
//* |
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 6; // 当可用视角<=2时强制单视图 |
|
|
|
bool bInvalidFacesRelative = false; |
|
IIndex invalidView; |
|
float invalidQuality; |
|
|
|
// 当视角不足时,只保留最佳视角 |
|
if (numViews <= minSingleView) { |
|
// if (true) { |
|
std::vector<std::pair<float, IIndex>> sortedViews; |
|
std::vector<std::pair<float, Color>> sortedViews2; |
|
sortedViews.reserve(faceDatas.size()); |
|
sortedViews2.reserve(faceDatas.size()); |
|
for (const FaceData& fd : faceDatas) { |
|
|
|
if (fd.bInvalidFacesRelative) |
|
{ |
|
bInvalidFacesRelative = true; |
|
// sortedViews.emplace_back(fd.quality, fd.idxView); |
|
invalidView = fd.idxView; |
|
invalidQuality = fd.quality; |
|
|
|
// const Label label = (Label)fd.idxView + 1; |
|
// inference.SetDataCost(label, f, MaxEnergy); |
|
// sortedViews.emplace_back(fd.quality, fd.idxView); |
|
sortedViews2.emplace_back(fd.quality, fd.color); |
|
} |
|
else |
|
{ |
|
// if (fd.quality<=999.0) |
|
{ |
|
sortedViews.emplace_back(fd.quality, fd.idxView); |
|
sortedViews2.emplace_back(fd.quality, fd.color); |
|
// printf("1fd.quality=%f\n", fd.quality); |
|
} |
|
// else |
|
// printf("2fd.quality=%f\n", fd.quality); |
|
} |
|
} |
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
std::sort(sortedViews2.begin(), sortedViews2.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
// 设置数据成本:最佳视角成本最低,其他按质量排序递增 |
|
const float baseCostScale = 0.1f; // 基础成本缩放系数 |
|
const float costStep = 0.3f; // 相邻视角成本增量 |
|
|
|
if (bInvalidFacesRelative && sortedViews.size() == 0) |
|
// if (bInvalidFacesRelative) |
|
{ |
|
// const Label label = (Label)sortedViews[0].second + 1; |
|
const Label label = (Label)invalidView + 1; |
|
float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; |
|
cost = 0; |
|
inference.SetDataCost(label, f, cost); |
|
continue; |
|
} |
|
|
|
/* |
|
int nSize = sortedViews2.size(); |
|
|
|
float totalQuality = 0.0f; |
|
Color totalColor(0,0,0); |
|
for (int n = 0; n < nSize; ++n) { |
|
totalQuality += sortedViews2[n].first; |
|
totalColor += sortedViews2[n].second; |
|
} |
|
const float avgQuality = totalQuality / nSize; |
|
const Color avgColor = totalColor / nSize; |
|
|
|
if (sortedViews2.size()<=0) |
|
continue; |
|
|
|
// printf("sortedViews2.size=%d\n", sortedViews2.size()); |
|
|
|
const Color medianColor = ComputeMedianColorAndQuality(sortedViews2).color; |
|
const float medianQuality = ComputeMedianColorAndQuality(sortedViews2).quality; |
|
//*/ |
|
for (size_t i = 0; i < sortedViews.size(); ++i) { |
|
const Label label = (Label)sortedViews[i].second + 1; |
|
float cost; |
|
// 过滤不可见的面 |
|
std::string strPath = images[label-1].name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
// if (!scene.is_face_visible(strName.c_str(), f)) |
|
// continue; |
|
// 过滤不可见的面 |
|
|
|
/* |
|
const Color& viewColor = sortedViews2[i].second; |
|
// float colorDistance = cv::norm(avgColor - viewColor); |
|
// if (colorDistance>0.0001) |
|
// if (nSize>0) |
|
// printf("colorDistance=%f, nSize=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize, |
|
// avgColor.x, avgColor.y, avgColor.z, viewColor.x, viewColor.y, viewColor.z); |
|
// if (colorDistance>0.000) |
|
// continue; |
|
|
|
float colorDistance = cv::norm(viewColor - medianColor); |
|
|
|
if (colorDistance>0.0000) |
|
printf("colorDistance=%f, nSize=%d, i=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize, i, |
|
medianColor.x, medianColor.y, medianColor.z, viewColor.x, viewColor.y, viewColor.z); |
|
// float luminanceDistance = std::abs(viewLuminance - medianLuminance); |
|
if (colorDistance>10.0000) |
|
continue; |
|
//*/ |
|
|
|
if (i == 0) { |
|
// 最佳视角 |
|
// cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; |
|
cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; |
|
} else { |
|
// 其他视角:成本随排名线性增加 |
|
int stepIndex = i; |
|
// if (i > 3) |
|
// stepIndex = i - 3; |
|
cost = MaxEnergy * (baseCostScale + costStep * stepIndex); |
|
// 确保成本不超过MaxEnergy |
|
cost = std::min(cost, MaxEnergy); |
|
} |
|
|
|
inference.SetDataCost(label, f, cost); |
|
} |
|
|
|
// // 找到质量最高的视角 |
|
// float maxQuality = 0; |
|
// IIndex bestView = NO_ID; |
|
// for (const FaceData& fd : faceDatas) { |
|
// if (fd.quality > maxQuality) { |
|
// maxQuality = fd.quality; |
|
// bestView = fd.idxView; |
|
// } |
|
// } |
|
// // 只设置最佳视角的数据项,其他设为MaxEnergy |
|
// for (const FaceData& fd : faceDatas) { |
|
// const Label label = (Label)fd.idxView + 1; |
|
// // const float cost = (fd.idxView == bestView) ? |
|
// // (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// // MaxEnergy; |
|
// const float cost = (fd.idxView == bestView) ? |
|
// (1.f - fd.quality/normQuality) * MaxEnergy : |
|
// MaxEnergy; |
|
// inference.SetDataCost(label, f, cost); |
|
// } |
|
} |
|
else { |
|
/* |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
// const float normalizedQuality = faceData.quality/normQuality; |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
*/ |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView+1); |
|
const float cost = (faceData.quality>=normQuality) ? |
|
(1.f - faceData.quality/normQuality) * MaxEnergy : |
|
0; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// 初始化后添加 |
|
DEBUG_EXTRA("Starting LBP optimization with %d nodes", inference.GetNumNodes()); |
|
inference.Optimize(); |
|
DEBUG_EXTRA("LBP optimization finished"); |
|
#endif |
|
|
|
// extract resulting labeling |
|
LabelArr virtualLabels(virtualFaces.size()); |
|
virtualLabels.Memset(0xFF); |
|
FOREACH(l, virtualLabels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
virtualLabels[l] = label-1; |
|
} |
|
/* |
|
FOREACH(l, labels) { |
|
labels[l] = virtualLabels[mapFaceToVirtualFace[l]]; |
|
} |
|
*/ |
|
|
|
/* |
|
if (!perFaceFaces.empty()) { |
|
FOREACH(f, perFaceFaces) { |
|
FaceDataArr& faceData = facesDatas[f]; |
|
if (faceData.empty()) continue; |
|
|
|
// 选择最佳视图 |
|
float bestQuality = -1; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& data : faceData) { |
|
if (data.quality > bestQuality) { |
|
bestQuality = data.quality; |
|
bestView = data.idxView; |
|
} |
|
} |
|
|
|
labels[f] = bestView; |
|
} |
|
} |
|
//*/ |
|
//* |
|
// 修改后安全版本 |
|
FOREACH(l, labels) { |
|
if (l < mapFaceToVirtualFace.size()) { |
|
const size_t virtualIdx = mapFaceToVirtualFace[l]; |
|
if (virtualIdx < virtualLabels.size()) { |
|
labels[l] = virtualLabels[virtualIdx]; |
|
} else { |
|
labels[l] = NO_ID; |
|
DEBUG_EXTRA("Warning: Invalid virtual face index for face %u: %u (max: %u)", |
|
l, virtualIdx, virtualLabels.size()-1); |
|
} |
|
} else { |
|
labels[l] = NO_ID; |
|
DEBUG_EXTRA("Warning: Face index out of bounds: %u (max: %u)", |
|
l, mapFaceToVirtualFace.size()-1); |
|
} |
|
} |
|
//*/ |
|
// 修改后安全版本 |
|
/* |
|
FOREACH(l, labels) { |
|
if (l < mapFaceToVirtualFace.size()) { |
|
const size_t virtualIdx = mapFaceToVirtualFace[l]; |
|
if (virtualIdx < virtualLabels.size()) { |
|
labels[l] = virtualLabels[virtualIdx]; |
|
} else { |
|
// 虚拟面映射失败,回退到非虚拟面方法:选择最佳视图 |
|
const FaceDataArr& faceDatas = facesDatas[l]; |
|
if (!faceDatas.empty()) { |
|
// 找到质量最高的视角 |
|
float maxQuality = -1; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
labels[l] = bestView; |
|
} else { |
|
labels[l] = NO_ID; |
|
} |
|
DEBUG_EXTRA("Warning: Invalid virtual face index for face %u: %u (max: %u) - using best view %u", |
|
l, virtualIdx, virtualLabels.size()-1, labels[l]); |
|
} |
|
} else { |
|
// 面片索引越界,同样回退到非虚拟面方法 |
|
const FaceDataArr& faceDatas = facesDatas[l]; |
|
if (!faceDatas.empty()) { |
|
float maxQuality = -1; |
|
IIndex bestView = NO_ID; |
|
for (const FaceData& fd : faceDatas) { |
|
if (fd.quality > maxQuality && !fd.bInvalidFacesRelative) { |
|
maxQuality = fd.quality; |
|
bestView = fd.idxView; |
|
} |
|
} |
|
labels[l] = bestView; |
|
} else { |
|
labels[l] = NO_ID; |
|
} |
|
DEBUG_EXTRA("Warning: Face index out of bounds: %u (max: %u) - using best view %u", |
|
l, mapFaceToVirtualFace.size()-1, labels[l]); |
|
} |
|
} |
|
*/ |
|
#endif |
|
} |
|
|
|
graph.clear(); |
|
|
|
//* |
|
// 标记虚拟面边界为接缝 |
|
FOREACH(idxVF, virtualFaces) { |
|
const auto& vf = virtualFaces[idxVF]; |
|
for (FIndex fid : vf) { |
|
const auto& adjFaces = faceFaces[fid]; |
|
for (int i=0; i<3; ++i) { |
|
if (adjFaces[i] == NO_ID) continue; |
|
const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; |
|
if (adjVF != idxVF) { |
|
seamEdges.emplace_back(fid, adjFaces[i]); |
|
} |
|
} |
|
} |
|
} |
|
//*/ |
|
} |
|
|
|
/* |
|
#if TEXOPT_USE_ANISOTROPIC |
|
const int anisoLevel = 8; // 设置各向异性过滤级别 |
|
for (auto& tex : textures) { |
|
tex.SetFilterMode(Texture::ANISOTROPIC); |
|
tex.SetAnisotropy(anisoLevel); |
|
} |
|
#endif |
|
//*/ |
|
|
|
// create the graph of faces: each vertex is a face and the edges are the edges shared by the faces |
|
FOREACH(idxFace, faces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
FOREACH(idxFace, faces) { |
|
const Mesh::FaceFaces& afaces = faceFaces[idxFace]; |
|
for (int v=0; v<3; ++v) { |
|
const FIndex idxFaceAdj = afaces[v]; |
|
if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(facesDatas[idxFace].empty()); |
|
const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) { |
|
if (bInvisibleFace != bInvisibleFaceAdj) |
|
seamEdges.emplace_back(idxFace, idxFaceAdj); |
|
continue; |
|
} |
|
boost::add_edge(idxFace, idxFaceAdj, graph); |
|
} |
|
} |
|
faceFaces.Release(); |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); |
|
|
|
LOG_OUT() << "bUseVirtualFaces=" << bUseVirtualFaces << std::endl; |
|
// start patch creation starting directly from individual faces |
|
|
|
if (bUseVirtualFaces) |
|
// if (false) |
|
{ |
|
// normalize quality values |
|
float maxQuality(0); |
|
/* |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
*/ |
|
FOREACH(idxFace, facesDatas) { |
|
if (labels[idxFace] != NO_ID) |
|
continue; |
|
const FaceDataArr& faceDataArr = facesDatas[idxFace]; |
|
for (const FaceData& faceData : faceDataArr) { |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
/* |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
*/ |
|
FOREACH(idxFace, facesDatas) { |
|
if (labels[idxFace] != NO_ID) |
|
continue; |
|
const FaceDataArr& faceDataArr = facesDatas[idxFace]; |
|
for (const FaceData& faceData : faceDataArr) { |
|
hist.Add(faceData.quality); |
|
} |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; |
|
{ |
|
inference.SetNumNodes(faces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
// inference.SetSmoothCost(SmoothnessLinear); |
|
// inference.SetSmoothCost(NewSmoothness); |
|
|
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
/* |
|
for (const FaceDataArr& faceDatas : facesDatas) { |
|
for (const FaceData& faceData : faceDatas) { |
|
if (faceData.quality > maxQuality) |
|
maxQuality = faceData.quality; |
|
} |
|
} |
|
for (const FaceDataArr& faceDatas : facesDatas) { |
|
for (const FaceData& faceData : faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
*/ |
|
|
|
FOREACH(f, faces) { |
|
if (labels[f] != NO_ID) { |
|
const Label assignedLabel = (Label)(labels[f] + 1); |
|
inference.SetDataCost(assignedLabel, f, 0); |
|
} |
|
} |
|
|
|
FOREACH(f, facesDatas) |
|
{ |
|
if (labels[f] != NO_ID) |
|
continue; |
|
|
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 1; // 与虚拟面模式相同的阈值 |
|
|
|
bool bInvalidFacesRelative = false; |
|
IIndex invalidView; |
|
float invalidQuality; |
|
|
|
{ |
|
|
|
std::vector<std::pair<float, IIndex>> sortedViews; |
|
sortedViews.reserve(faceDatas.size()); |
|
for (const FaceData& fd : faceDatas) |
|
{ |
|
|
|
if (fd.bInvalidFacesRelative) |
|
{ |
|
bInvalidFacesRelative = true; |
|
// sortedViews.emplace_back(fd.quality, fd.idxView); |
|
invalidView = fd.idxView; |
|
invalidQuality = fd.quality; |
|
} |
|
else |
|
{ |
|
// if (fd.quality<=999.0) |
|
{ |
|
sortedViews.emplace_back(fd.quality, fd.idxView); |
|
// printf("1fd.quality=%f\n", fd.quality); |
|
} |
|
// else |
|
// printf("2fd.quality=%f\n", fd.quality); |
|
} |
|
} |
|
|
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
// 设置数据成本:最佳视角成本最低,其他按质量排序递增 |
|
const float baseCostScale = 0.1f; // 基础成本缩放系数 |
|
const float costStep = 0.3f; // 相邻视角成本增量 |
|
|
|
for (const auto& image : images) |
|
{ |
|
// printf("image name=%s\n", image.name.c_str()); |
|
} |
|
|
|
if (bInvalidFacesRelative && sortedViews.size() == 0) |
|
{ |
|
// const Label label = (Label)sortedViews[0].second + 1; |
|
const Label label = (Label)invalidView + 1; |
|
float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; |
|
// float cost = 0; |
|
inference.SetDataCost(label, f, cost); |
|
continue; |
|
} |
|
|
|
// printf("sortedViews size=%d\n", sortedViews.size()); |
|
for (size_t i = 0; i < sortedViews.size(); ++i) |
|
{ |
|
const Label label = (Label)sortedViews[i].second + 1; |
|
float cost; |
|
|
|
std::string strPath = images[label-1].name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
if (i == 0) { |
|
// if (true) { |
|
// 最佳视角 |
|
// cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; |
|
cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; |
|
// cost = 0; |
|
inference.SetDataCost(label, f, cost); |
|
} else { |
|
// 其他视角:成本随排名线性增加 |
|
int stepIndex = i; |
|
// if (i > 3) |
|
// stepIndex = i - 3; |
|
cost = MaxEnergy * (baseCostScale + costStep * stepIndex); |
|
// 确保成本不超过MaxEnergy |
|
cost = std::min(cost, MaxEnergy); |
|
// cost = MaxEnergy; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
|
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
// inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
FOREACH(l, labels) |
|
{ |
|
if (labels[l] != NO_ID) |
|
continue; |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
{ |
|
labels[l] = label-1; |
|
labelsInvalid[l] = labels[l]; |
|
} |
|
} |
|
#endif |
|
} |
|
|
|
if (!bUseVirtualFaces) |
|
{ |
|
// assign the best view to each face |
|
labels.resize(faces.size()); |
|
{ |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(faces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
// inference.SetSmoothCost(SmoothnessLinear); |
|
// inference.SetSmoothCost(NewSmoothness); |
|
|
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
//* |
|
for (const FaceDataArr& faceDatas : facesDatas) { |
|
for (const FaceData& faceData : faceDatas) { |
|
if (faceData.quality > maxQuality) |
|
maxQuality = faceData.quality; |
|
} |
|
} |
|
for (const FaceDataArr& faceDatas : facesDatas) { |
|
for (const FaceData& faceData : faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
|
|
FOREACH(f, facesDatas) { |
|
// if (scene.mesh.invalidFacesRelative.data.contains(f)) |
|
// continue; |
|
|
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
const unsigned minSingleView = 6; // 与虚拟面模式相同的阈值 |
|
|
|
bool bInvalidFacesRelative = false; |
|
IIndex invalidView; |
|
float invalidQuality; |
|
|
|
// if (numViews <= minSingleView) { |
|
if (true) { |
|
|
|
std::vector<std::pair<float, IIndex>> sortedViews; |
|
sortedViews.reserve(faceDatas.size()); |
|
for (const FaceData& fd : faceDatas) { |
|
|
|
if (fd.bInvalidFacesRelative) |
|
{ |
|
bInvalidFacesRelative = true; |
|
// sortedViews.emplace_back(fd.quality, fd.idxView); |
|
invalidView = fd.idxView; |
|
invalidQuality = fd.quality; |
|
} |
|
else |
|
{ |
|
// if (fd.quality<=999.0) |
|
{ |
|
sortedViews.emplace_back(fd.quality, fd.idxView); |
|
// printf("1fd.quality=%f\n", fd.quality); |
|
} |
|
// else |
|
// printf("2fd.quality=%f\n", fd.quality); |
|
} |
|
} |
|
|
|
std::sort(sortedViews.begin(), sortedViews.end(), |
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
// 设置数据成本:最佳视角成本最低,其他按质量排序递增 |
|
const float baseCostScale = 0.1f; // 基础成本缩放系数 |
|
const float costStep = 0.3f; // 相邻视角成本增量 |
|
|
|
for (const auto& image : images) |
|
{ |
|
// printf("image name=%s\n", image.name.c_str()); |
|
} |
|
|
|
if (bInvalidFacesRelative && sortedViews.size() == 0) |
|
{ |
|
// const Label label = (Label)sortedViews[0].second + 1; |
|
const Label label = (Label)invalidView + 1; |
|
float cost = (1.f - invalidQuality / normQuality) * MaxEnergy; |
|
// float cost = 0; |
|
inference.SetDataCost(label, f, cost); |
|
continue; |
|
} |
|
|
|
// printf("sortedViews size=%d\n", sortedViews.size()); |
|
for (size_t i = 0; i < sortedViews.size(); ++i) { |
|
const Label label = (Label)sortedViews[i].second + 1; |
|
float cost; |
|
|
|
std::string strPath = images[label-1].name; |
|
size_t lastSlash = strPath.find_last_of("/\\"); |
|
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始 |
|
else lastSlash++; // 跳过分隔符 |
|
|
|
// 查找扩展名分隔符 '.' 的位置 |
|
size_t lastDot = strPath.find_last_of('.'); |
|
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾 |
|
|
|
// 截取文件名(不含路径和扩展名) |
|
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash); |
|
|
|
if (i == 0) { |
|
// if (true) { |
|
// 最佳视角 |
|
// cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale; |
|
cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy; |
|
// cost = 0; |
|
inference.SetDataCost(label, f, cost); |
|
} else { |
|
// 其他视角:成本随排名线性增加 |
|
int stepIndex = i; |
|
// if (i > 3) |
|
// stepIndex = i - 3; |
|
cost = MaxEnergy * (baseCostScale + costStep * stepIndex); |
|
// 确保成本不超过MaxEnergy |
|
cost = std::min(cost, MaxEnergy); |
|
// cost = MaxEnergy; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
|
|
} else { |
|
for (const FaceData& fd : faceDatas) { |
|
const Label label = (Label)fd.idxView + 1; |
|
const float normalizedQuality = fd.quality / normQuality; |
|
const float cost = (1.f - normalizedQuality) * MaxEnergy; |
|
inference.SetDataCost(label, f, cost); |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
// assign the optimal view (label) to each face |
|
// (label 0 is reserved as undefined) |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
labels[l] = label-1; |
|
} |
|
#endif |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_TRWS |
|
// find connected components |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// map face ID from global to component space |
|
typedef cList<NodeID, NodeID, 0, 128, NodeID> NodeIDs; |
|
NodeIDs nodeIDs(faces.size()); |
|
NodeIDs sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
nodeIDs[c] = sizes[components[c]]++; |
|
|
|
// initialize inference structures |
|
const LabelID numLabels(images.size()+1); |
|
CLISTDEFIDX(TRWSInference, FIndex) inferences(nComponents); |
|
FOREACH(s, sizes) { |
|
const NodeID numNodes(sizes[s]); |
|
ASSERT(numNodes > 0); |
|
if (numNodes <= 1) |
|
continue; |
|
TRWSInference& inference = inferences[s]; |
|
inference.Init(numNodes, numLabels); |
|
} |
|
|
|
// set data costs |
|
{ |
|
// add nodes |
|
CLISTDEF0(EnergyType) D(numLabels); |
|
FOREACH(f, facesDatas) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
D.MemsetValue(MaxEnergy); |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
for (const FaceData& faceData: faceDatas) { |
|
const Label label((Label)faceData.idxView); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const EnergyType dataCost(MaxEnergy*(1.f-normalizedQuality)); |
|
D[label] = dataCost; |
|
} |
|
const NodeID nodeID(nodeIDs[f]); |
|
inference.AddNode(nodeID, D.Begin()); |
|
} |
|
// add edges |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
TRWSInference& inference = inferences[components[f]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
ASSERT(components[f] == components[fAdj]); |
|
if (f < fAdj) // add edges only once |
|
inference.AddEdge(nodeIDs[f], nodeIDs[fAdj]); |
|
} |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)inferences.size(); ++i) { |
|
#else |
|
FOREACH(i, inferences) { |
|
#endif |
|
TRWSInference& inference = inferences[i]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
inference.Optimize(); |
|
} |
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
TRWSInference& inference = inferences[components[l]]; |
|
if (inference.IsEmpty()) |
|
continue; |
|
const Label label(inference.GetLabel(nodeIDs[l])); |
|
ASSERT(label >= 0 && label < numLabels); |
|
if (label < images.size()) |
|
labels[l] = label; |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
// create texture patches |
|
{ |
|
// divide graph in sub-graphs of connected faces having the same label |
|
EdgeIter ei, eie; |
|
const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); |
|
for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { |
|
const FIndex fSource((FIndex)ei->m_source); |
|
const FIndex fTarget((FIndex)ei->m_target); |
|
ASSERT(components.empty() || components[fSource] == components[fTarget]); |
|
if (labels[fSource] != labels[fTarget]) |
|
seamEdges.emplace_back(fSource, fTarget); |
|
} |
|
for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) |
|
boost::remove_edge(pEdge->i, pEdge->j, graph); |
|
|
|
// find connected components: texture patches |
|
ASSERT((FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// create texture patches; |
|
// last texture patch contains all faces with no texture |
|
LabelArr sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
++sizes[components[c]]; |
|
texturePatches.resize(nComponents+1); |
|
texturePatches.back().label = NO_ID; |
|
FOREACH(f, faces) { |
|
const Label label(labels[f]); |
|
const FIndex c(components[f]); |
|
TexturePatch& texturePatch = texturePatches[c]; |
|
ASSERT(texturePatch.label == label || texturePatch.faces.empty()); |
|
if (label == NO_ID) { |
|
texturePatch.label = NO_ID; |
|
texturePatches.back().faces.Insert(f); |
|
} else { |
|
|
|
if ((labelsInvalid[f] != NO_ID) && false) |
|
{ |
|
if (texturePatch.faces.empty()) { |
|
texturePatch.label = label; |
|
// texturePatch.faces.reserve(sizes[c]); |
|
texturePatch.faces.reserve(sizes[c]); |
|
} |
|
texturePatch.faces = {f}; |
|
} |
|
else |
|
{ |
|
if (texturePatch.faces.empty()) { |
|
texturePatch.label = label; |
|
texturePatch.faces.reserve(sizes[c]); |
|
} |
|
texturePatch.faces.Insert(f); |
|
} |
|
} |
|
} |
|
// remove all patches with invalid label (except the last one) |
|
// and create the map from the old index to the new one |
|
mapIdxPatch.resize(nComponents); |
|
std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); |
|
for (FIndex t = nComponents; t-- > 0; ) { |
|
if (texturePatches[t].label == NO_ID) { |
|
texturePatches.RemoveAtMove(t); |
|
mapIdxPatch.RemoveAtMove(t); |
|
} |
|
} |
|
const unsigned numPatches(texturePatches.size()-1); |
|
uint32_t idxPatch(0); |
|
for (IndexArr::IDX i=0; i<mapIdxPatch.size(); ++i) { |
|
while (i < mapIdxPatch[i]) |
|
mapIdxPatch.InsertAt(i++, numPatches); |
|
mapIdxPatch[i] = idxPatch++; |
|
} |
|
while (mapIdxPatch.size() <= nComponents) |
|
mapIdxPatch.Insert(numPatches); |
|
} |
|
} |
|
return true; |
|
} |
|
|
|
bool MeshTexture::FaceViewSelection4( unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, int nIgnoreMaskLabel, const IIndexArr& views, const Mesh::FaceIdxArr* faceIndices) |
|
{ |
|
// 根据传入的面片索引决定处理范围 |
|
Mesh::FaceIdxArr remainingFaces; |
|
if (faceIndices && !faceIndices->empty()) { |
|
remainingFaces = *faceIndices; |
|
} else { |
|
remainingFaces.resize(faces.size()); |
|
std::iota(remainingFaces.begin(), remainingFaces.end(), 0); |
|
} |
|
|
|
const bool bUseVirtualFaces(minCommonCameras > 0); |
|
// extract array of triangles incident to each vertex |
|
|
|
ListVertexFaces(); |
|
|
|
// create texture patches |
|
{ |
|
// printf("FaceViewSelection3 1 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); |
|
// compute face normals and smoothen them |
|
scene.mesh.SmoothNormalFaces(); |
|
// printf("FaceViewSelection3 2 scene.mesh.vertices.size=%d\n", scene.mesh.vertices.size()); |
|
|
|
// list all views for each face |
|
FaceDataViewArr facesDatas; |
|
if (!ListCameraFaces(facesDatas, fOutlierThreshold, nIgnoreMaskLabel, views, bUseVirtualFaces)) |
|
return false; |
|
|
|
// create faces graph |
|
typedef boost::adjacency_list<boost::vecS, boost::vecS, boost::undirectedS> Graph; |
|
typedef boost::graph_traits<Graph>::edge_iterator EdgeIter; |
|
typedef boost::graph_traits<Graph>::out_edge_iterator EdgeOutIter; |
|
Graph graph; |
|
LabelArr labels; |
|
|
|
// construct and use virtual faces for patch creation instead of actual mesh faces; |
|
// the virtual faces are composed of coplanar triangles sharing same views |
|
if (bUseVirtualFaces) { |
|
|
|
Mesh::FaceIdxArr mapFaceToVirtualFace(faces.size()); // for each mesh face ID, store the virtual face ID witch contains it |
|
|
|
// 1) create FaceToVirtualFaceMap |
|
FaceDataViewArr virtualFacesDatas; |
|
VirtualFaceIdxsArr virtualFaces; // stores each virtual face as an array of mesh face ID |
|
CreateVirtualFaces5(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras); |
|
size_t controlCounter(0); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
mapFaceToVirtualFace[idxFace] = idxVF; |
|
++controlCounter; |
|
} |
|
} |
|
|
|
ASSERT(controlCounter == faces.size()); |
|
// 2) create function to find virtual faces neighbors |
|
VirtualFaceIdxsArr virtualFaceNeighbors; |
|
{ // for each virtual face, the list of virtual faces with at least one vertex in common |
|
virtualFaceNeighbors.resize(virtualFaces.size()); |
|
FOREACH(idxVF, virtualFaces) { |
|
const Mesh::FaceIdxArr& vf = virtualFaces[idxVF]; |
|
Mesh::FaceIdxArr& vfNeighbors = virtualFaceNeighbors[idxVF]; |
|
for (FIndex idxFace : vf) { |
|
const Mesh::FaceFaces& adjFaces = faceFaces[idxFace]; |
|
for (int i = 0; i < 3; ++i) { |
|
const FIndex fAdj(adjFaces[i]); |
|
if (fAdj == NO_ID) |
|
continue; |
|
if (mapFaceToVirtualFace[fAdj] == idxVF) |
|
continue; |
|
if (fAdj != idxFace && vfNeighbors.Find(mapFaceToVirtualFace[fAdj]) == Mesh::FaceIdxArr::NO_INDEX) { |
|
vfNeighbors.emplace_back(mapFaceToVirtualFace[fAdj]); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
// 3) use virtual faces to build the graph |
|
FOREACH(idxFace, virtualFaces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
|
|
FOREACH(idxVirtualFace, virtualFaces) { |
|
const Mesh::FaceIdxArr& afaces = virtualFaceNeighbors[idxVirtualFace]; |
|
for (FIndex idxVirtualFaceAdj: afaces) { |
|
if (idxVirtualFace >= idxVirtualFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(virtualFacesDatas[idxVirtualFace].empty()); |
|
const bool bInvisibleFaceAdj(virtualFacesDatas[idxVirtualFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) |
|
continue; |
|
boost::add_edge(idxVirtualFace, idxVirtualFaceAdj, graph); |
|
} |
|
} |
|
|
|
printf("virtualFacesDatas.size()=%d, facesDatas.size()=%d\n", virtualFacesDatas.size(), facesDatas.size()); |
|
|
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == virtualFaces.size()); |
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: virtualFacesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.8f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(virtualFaces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, virtualFaces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
FOREACH(f, virtualFacesDatas) { |
|
const FaceDataArr& faceDatas = virtualFacesDatas[f]; |
|
const size_t numViews = faceDatas.size(); |
|
|
|
// 跳过无效面片(bInvalidFacesRelative=true) |
|
bool hasValidView = false; |
|
for (const FaceData& fd : faceDatas) { |
|
if (!fd.bInvalidFacesRelative) { |
|
hasValidView = true; |
|
break; |
|
} |
|
} |
|
if (!hasValidView) { |
|
|
|
for (const FaceData& faceData: faceDatas) { |
|
// 跳过无效视图 |
|
if (faceData.bInvalidFacesRelative) |
|
continue; |
|
|
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
dataCost = MaxEnergy; |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
|
|
continue; |
|
} |
|
|
|
for (const FaceData& faceData: faceDatas) { |
|
// 跳过无效视图 |
|
if (faceData.bInvalidFacesRelative) |
|
continue; |
|
|
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
LabelArr virtualLabels(virtualFaces.size()); |
|
virtualLabels.Memset(0xFF); |
|
FOREACH(l, virtualLabels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
virtualLabels[l] = label-1; |
|
} |
|
|
|
// 安全地将虚拟面标签映射到网格面 |
|
FOREACH(l, labels) { |
|
if (l < mapFaceToVirtualFace.size()) { |
|
const size_t virtualIdx = mapFaceToVirtualFace[l]; |
|
if (virtualIdx < virtualLabels.size()) { |
|
labels[l] = virtualLabels[virtualIdx]; |
|
} else { |
|
labels[l] = NO_ID; |
|
} |
|
} else { |
|
labels[l] = NO_ID; |
|
} |
|
} |
|
#endif |
|
} |
|
|
|
graph.clear(); |
|
|
|
// 标记虚拟面边界为接缝 |
|
FOREACH(idxVF, virtualFaces) { |
|
const auto& vf = virtualFaces[idxVF]; |
|
for (FIndex fid : vf) { |
|
const auto& adjFaces = faceFaces[fid]; |
|
for (int i=0; i<3; ++i) { |
|
if (adjFaces[i] == NO_ID) continue; |
|
const FIndex adjVF = mapFaceToVirtualFace[adjFaces[i]]; |
|
if (adjVF != idxVF) { |
|
seamEdges.emplace_back(fid, adjFaces[i]); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
// create the graph of faces: each vertex is a face and the edges are the edges shared by the faces |
|
FOREACH(idxFace, faces) { |
|
MAYBEUNUSED const Mesh::FIndex idx((Mesh::FIndex)boost::add_vertex(graph)); |
|
ASSERT(idx == idxFace); |
|
} |
|
FOREACH(idxFace, faces) { |
|
const Mesh::FaceFaces& afaces = faceFaces[idxFace]; |
|
for (int v=0; v<3; ++v) { |
|
const FIndex idxFaceAdj = afaces[v]; |
|
if (idxFaceAdj == NO_ID || idxFace >= idxFaceAdj) |
|
continue; |
|
const bool bInvisibleFace(facesDatas[idxFace].empty()); |
|
const bool bInvisibleFaceAdj(facesDatas[idxFaceAdj].empty()); |
|
if (bInvisibleFace || bInvisibleFaceAdj) { |
|
if (bInvisibleFace != bInvisibleFaceAdj) |
|
seamEdges.emplace_back(idxFace, idxFaceAdj); |
|
continue; |
|
} |
|
boost::add_edge(idxFace, idxFaceAdj, graph); |
|
} |
|
} |
|
faceFaces.Release(); |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); |
|
|
|
// start patch creation starting directly from individual faces |
|
if (!bUseVirtualFaces) { |
|
// assign the best view to each face |
|
labels.resize(faces.size()); { |
|
// normalize quality values |
|
float maxQuality(0); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
if (maxQuality < faceData.quality) |
|
maxQuality = faceData.quality; |
|
} |
|
Histogram32F hist(std::make_pair(0.f, maxQuality), 1000); |
|
for (const FaceDataArr& faceDatas: facesDatas) { |
|
for (const FaceData& faceData: faceDatas) |
|
hist.Add(faceData.quality); |
|
} |
|
const float normQuality(hist.GetApproximatePermille(0.95f)); |
|
|
|
#if TEXOPT_INFERENCE == TEXOPT_INFERENCE_LBP |
|
// initialize inference structures |
|
const LBPInference::EnergyType MaxEnergy(fRatioDataSmoothness*(LBPInference::EnergyType)LBPInference::MaxEnergy); |
|
LBPInference inference; { |
|
inference.SetNumNodes(faces.size()); |
|
inference.SetSmoothCost(SmoothnessPotts); |
|
EdgeOutIter ei, eie; |
|
FOREACH(f, faces) { |
|
for (boost::tie(ei, eie) = boost::out_edges(f, graph); ei != eie; ++ei) { |
|
ASSERT(f == (FIndex)ei->m_source); |
|
const FIndex fAdj((FIndex)ei->m_target); |
|
if (f < fAdj) // add edges only once |
|
inference.SetNeighbors(f, fAdj); |
|
} |
|
// set costs for label 0 (undefined) |
|
inference.SetDataCost((Label)0, f, MaxEnergy); |
|
} |
|
} |
|
|
|
FOREACH(f, facesDatas) { |
|
const FaceDataArr& faceDatas = facesDatas[f]; |
|
|
|
// 跳过无效面片(bInvalidFacesRelative=true) |
|
bool hasValidView = false; |
|
for (const FaceData& fd : faceDatas) { |
|
if (!fd.bInvalidFacesRelative) { |
|
hasValidView = true; |
|
break; |
|
} |
|
} |
|
if (!hasValidView) { |
|
continue; |
|
} |
|
|
|
for (const FaceData& faceData: faceDatas) { |
|
// 跳过无效视图 |
|
if (faceData.bInvalidFacesRelative) |
|
continue; |
|
|
|
const Label label((Label)faceData.idxView+1); |
|
const float normalizedQuality(faceData.quality>=normQuality ? 1.f : faceData.quality/normQuality); |
|
const float dataCost((1.f-normalizedQuality)*MaxEnergy); |
|
inference.SetDataCost(label, f, dataCost); |
|
} |
|
} |
|
|
|
// assign the optimal view (label) to each face |
|
inference.Optimize(); |
|
|
|
// extract resulting labeling |
|
labels.Memset(0xFF); |
|
FOREACH(l, labels) { |
|
const Label label(inference.GetLabel(l)); |
|
ASSERT(label < images.size()+1); |
|
if (label > 0) |
|
labels[l] = label-1; |
|
} |
|
#endif |
|
} |
|
} |
|
|
|
// create texture patches |
|
{ |
|
// divide graph in sub-graphs of connected faces having the same label |
|
EdgeIter ei, eie; |
|
const PairIdxArr::IDX startLabelSeamEdges(seamEdges.size()); |
|
for (boost::tie(ei, eie) = boost::edges(graph); ei != eie; ++ei) { |
|
const FIndex fSource((FIndex)ei->m_source); |
|
const FIndex fTarget((FIndex)ei->m_target); |
|
ASSERT(components.empty() || components[fSource] == components[fTarget]); |
|
if (labels[fSource] != labels[fTarget]) |
|
seamEdges.emplace_back(fSource, fTarget); |
|
} |
|
for (const PairIdx *pEdge=seamEdges.Begin()+startLabelSeamEdges, *pEdgeEnd=seamEdges.End(); pEdge!=pEdgeEnd; ++pEdge) |
|
boost::remove_edge(pEdge->i, pEdge->j, graph); |
|
|
|
// find connected components: texture patches |
|
ASSERT((Mesh::FIndex)boost::num_vertices(graph) == faces.size()); |
|
components.resize(faces.size()); |
|
const FIndex nComponents(boost::connected_components(graph, components.data())); |
|
|
|
// create texture patches; |
|
// last texture patch contains all faces with no texture |
|
LabelArr sizes(nComponents); |
|
sizes.Memset(0); |
|
FOREACH(c, components) |
|
++sizes[components[c]]; |
|
texturePatches.resize(nComponents+1); |
|
texturePatches.back().label = NO_ID; |
|
FOREACH(f, faces) { |
|
const Label label(labels[f]); |
|
const FIndex c(components[f]); |
|
TexturePatch& texturePatch = texturePatches[c]; |
|
ASSERT(texturePatch.label == label || texturePatch.faces.empty()); |
|
if (label == NO_ID) { |
|
texturePatch.label = NO_ID; |
|
texturePatches.back().faces.Insert(f); |
|
} else { |
|
if (texturePatch.faces.empty()) { |
|
texturePatch.label = label; |
|
texturePatch.faces.reserve(sizes[c]); |
|
} |
|
texturePatch.faces.Insert(f); |
|
} |
|
} |
|
// remove all patches with invalid label (except the last one) |
|
// and create the map from the old index to the new one |
|
mapIdxPatch.resize(nComponents); |
|
std::iota(mapIdxPatch.Begin(), mapIdxPatch.End(), 0); |
|
for (FIndex t = nComponents; t-- > 0; ) { |
|
if (texturePatches[t].label == NO_ID) { |
|
texturePatches.RemoveAtMove(t); |
|
mapIdxPatch.RemoveAtMove(t); |
|
} |
|
} |
|
const unsigned numPatches(texturePatches.size()-1); |
|
uint32_t idxPatch(0); |
|
for (IndexArr::IDX i=0; i<mapIdxPatch.size(); ++i) { |
|
while (i < mapIdxPatch[i]) |
|
mapIdxPatch.InsertAt(i++, numPatches); |
|
mapIdxPatch[i] = idxPatch++; |
|
} |
|
while (mapIdxPatch.size() <= nComponents) |
|
mapIdxPatch.Insert(numPatches); |
|
} |
|
} |
|
return true; |
|
} |
|
|
|
// create seam vertices and edges |
|
void MeshTexture::CreateSeamVertices() |
|
{ |
|
// each vertex will contain the list of patches it separates, |
|
// except the patch containing invisible faces; |
|
// each patch contains the list of edges belonging to that texture patch, starting from that vertex |
|
// (usually there are pairs of edges in each patch, representing the two edges starting from that vertex separating two valid patches) |
|
VIndex vs[2]; |
|
uint32_t vs0[2], vs1[2]; |
|
std::unordered_map<VIndex, uint32_t> mapVertexSeam; |
|
const unsigned numPatches(texturePatches.size()-1); |
|
for (const PairIdx& edge: seamEdges) { |
|
// store edge for the later seam optimization |
|
ASSERT(edge.i < edge.j); |
|
|
|
// if (labelsInvalid[edge.i] != NO_ID || labelsInvalid[edge.j] != NO_ID ) |
|
// continue; |
|
|
|
const uint32_t idxPatch0(mapIdxPatch[components[edge.i]]); |
|
const uint32_t idxPatch1(mapIdxPatch[components[edge.j]]); |
|
ASSERT(idxPatch0 != idxPatch1 || idxPatch0 == numPatches); |
|
if (idxPatch0 == idxPatch1) |
|
continue; |
|
seamVertices.ReserveExtra(2); |
|
scene.mesh.GetEdgeVertices(edge.i, edge.j, vs0, vs1); |
|
ASSERT(faces[edge.i][vs0[0]] == faces[edge.j][vs1[0]]); |
|
ASSERT(faces[edge.i][vs0[1]] == faces[edge.j][vs1[1]]); |
|
vs[0] = faces[edge.i][vs0[0]]; |
|
vs[1] = faces[edge.i][vs0[1]]; |
|
|
|
const auto itSeamVertex0(mapVertexSeam.emplace(std::make_pair(vs[0], seamVertices.size()))); |
|
if (itSeamVertex0.second) |
|
seamVertices.emplace_back(vs[0]); |
|
SeamVertex& seamVertex0 = seamVertices[itSeamVertex0.first->second]; |
|
|
|
const auto itSeamVertex1(mapVertexSeam.emplace(std::make_pair(vs[1], seamVertices.size()))); |
|
if (itSeamVertex1.second) |
|
seamVertices.emplace_back(vs[1]); |
|
SeamVertex& seamVertex1 = seamVertices[itSeamVertex1.first->second]; |
|
|
|
if (idxPatch0 < numPatches) { |
|
const TexCoord offset0(texturePatches[idxPatch0].rect.tl()); |
|
SeamVertex::Patch& patch00 = seamVertex0.GetPatch(idxPatch0); |
|
SeamVertex::Patch& patch10 = seamVertex1.GetPatch(idxPatch0); |
|
ASSERT(patch00.edges.Find(itSeamVertex1.first->second) == NO_ID); |
|
patch00.edges.emplace_back(itSeamVertex1.first->second).idxFace = edge.i; |
|
patch00.proj = faceTexcoords[edge.i*3+vs0[0]]+offset0; |
|
ASSERT(patch10.edges.Find(itSeamVertex0.first->second) == NO_ID); |
|
patch10.edges.emplace_back(itSeamVertex0.first->second).idxFace = edge.i; |
|
patch10.proj = faceTexcoords[edge.i*3+vs0[1]]+offset0; |
|
} |
|
if (idxPatch1 < numPatches) { |
|
const TexCoord offset1(texturePatches[idxPatch1].rect.tl()); |
|
SeamVertex::Patch& patch01 = seamVertex0.GetPatch(idxPatch1); |
|
SeamVertex::Patch& patch11 = seamVertex1.GetPatch(idxPatch1); |
|
ASSERT(patch01.edges.Find(itSeamVertex1.first->second) == NO_ID); |
|
patch01.edges.emplace_back(itSeamVertex1.first->second).idxFace = edge.j; |
|
patch01.proj = faceTexcoords[edge.j*3+vs1[0]]+offset1; |
|
ASSERT(patch11.edges.Find(itSeamVertex0.first->second) == NO_ID); |
|
patch11.edges.emplace_back(itSeamVertex0.first->second).idxFace = edge.j; |
|
patch11.proj = faceTexcoords[edge.j*3+vs1[1]]+offset1; |
|
} |
|
} |
|
seamEdges.Release(); |
|
} |
|
|
|
// Native |
|
void MeshTexture::GlobalSeamLeveling3() |
|
{ |
|
ASSERT(!seamVertices.empty()); |
|
const unsigned numPatches(texturePatches.size()-1); |
|
|
|
// Create a boolean array to mark invalid vertices |
|
BoolArr vertexInvalid(vertices.size()); |
|
vertexInvalid.Memset(false); |
|
FOREACH(f, faces) { |
|
if (labelsInvalid[f] != NO_ID) { |
|
const Face& face = faces[f]; |
|
for (int v=0; v<3; ++v) |
|
vertexInvalid[face[v]] = true; |
|
} |
|
} |
|
|
|
// find the patch ID for each vertex |
|
PatchIndices patchIndices(vertices.size()); |
|
patchIndices.Memset(0); |
|
FOREACH(f, faces) { |
|
// if (labelsInvalid[f] != NO_ID) |
|
// continue; |
|
const uint32_t idxPatch(mapIdxPatch[components[f]]); |
|
const Face& face = faces[f]; |
|
for (int v=0; v<3; ++v) |
|
patchIndices[face[v]].idxPatch = idxPatch; |
|
} |
|
FOREACH(i, seamVertices) { |
|
const SeamVertex& seamVertex = seamVertices[i]; |
|
ASSERT(!seamVertex.patches.empty()); |
|
PatchIndex& patchIndex = patchIndices[seamVertex.idxVertex]; |
|
patchIndex.bIndex = true; |
|
patchIndex.idxSeamVertex = i; |
|
} |
|
// assign a row index within the solution vector x to each vertex/patch |
|
ASSERT(vertices.size() < static_cast<VIndex>(std::numeric_limits<MatIdx>::max())); |
|
MatIdx rowsX(0); |
|
typedef std::unordered_map<uint32_t,MatIdx> VertexPatch2RowMap; |
|
cList<VertexPatch2RowMap> vertpatch2rows(vertices.size()); |
|
FOREACH(i, vertices) { |
|
const PatchIndex& patchIndex = patchIndices[i]; |
|
VertexPatch2RowMap& vertpatch2row = vertpatch2rows[i]; |
|
if (patchIndex.bIndex) { |
|
// vertex is part of multiple patches |
|
const SeamVertex& seamVertex = seamVertices[patchIndex.idxSeamVertex]; |
|
ASSERT(seamVertex.idxVertex == i); |
|
for (const SeamVertex::Patch& patch: seamVertex.patches) { |
|
ASSERT(patch.idxPatch != numPatches); |
|
vertpatch2row[patch.idxPatch] = rowsX++; |
|
} |
|
} else |
|
if (patchIndex.idxPatch < numPatches) { |
|
// vertex is part of only one patch |
|
vertpatch2row[patchIndex.idxPatch] = rowsX++; |
|
} |
|
} |
|
// fill Tikhonov's Gamma matrix (regularization constraints) |
|
const float lambda(0.1f); |
|
MatIdx rowsGamma(0); |
|
Mesh::VertexIdxArr adjVerts; |
|
CLISTDEF0(MatEntry) rows(0, vertices.size()*4); |
|
FOREACH(v, vertices) { |
|
adjVerts.Empty(); |
|
scene.mesh.GetAdjVertices(v, adjVerts); |
|
VertexPatchIterator itV(patchIndices[v], seamVertices); |
|
while (itV.Next()) { |
|
const uint32_t idxPatch(itV); |
|
if (idxPatch == numPatches) |
|
continue; |
|
const MatIdx col(vertpatch2rows[v].at(idxPatch)); |
|
for (const VIndex vAdj: adjVerts) { |
|
if (v >= vAdj) |
|
continue; |
|
VertexPatchIterator itVAdj(patchIndices[vAdj], seamVertices); |
|
while (itVAdj.Next()) { |
|
const uint32_t idxPatchAdj(itVAdj); |
|
if (idxPatch == idxPatchAdj) { |
|
const MatIdx colAdj(vertpatch2rows[vAdj].at(idxPatchAdj)); |
|
float currentLambda = (vertexInvalid[v] || vertexInvalid[vAdj]) ? 0.01f : 0.1f; |
|
// float currentLambda = 1.0f; |
|
rows.emplace_back(rowsGamma, col, currentLambda); |
|
rows.emplace_back(rowsGamma, colAdj, -currentLambda); |
|
++rowsGamma; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
ASSERT(rows.size()/2 < static_cast<IDX>(std::numeric_limits<MatIdx>::max())); |
|
|
|
SparseMat Gamma(rowsGamma, rowsX); |
|
Gamma.setFromTriplets(rows.Begin(), rows.End()); |
|
rows.Empty(); |
|
|
|
// fill the matrix A and the coefficients for the Vector b of the linear equation system |
|
IndexArr indices; |
|
Colors vertexColors; |
|
Colors coeffB; |
|
for (const SeamVertex& seamVertex: seamVertices) { |
|
if (seamVertex.patches.size() < 2) |
|
continue; |
|
seamVertex.SortByPatchIndex(indices); |
|
vertexColors.resize(indices.size()); |
|
FOREACH(i, indices) { |
|
const SeamVertex::Patch& patch0 = seamVertex.patches[indices[i]]; |
|
ASSERT(patch0.idxPatch < numPatches); |
|
SampleImage sampler(images[texturePatches[patch0.idxPatch].label].image); |
|
for (const SeamVertex::Patch::Edge& edge: patch0.edges) { |
|
const SeamVertex& seamVertex1 = seamVertices[edge.idxSeamVertex]; |
|
const SeamVertex::Patches::IDX idxPatch1(seamVertex1.patches.Find(patch0.idxPatch)); |
|
ASSERT(idxPatch1 != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch1 = seamVertex1.patches[idxPatch1]; |
|
sampler.AddEdge(patch0.proj, patch1.proj); |
|
} |
|
vertexColors[i] = sampler.GetColor(); |
|
} |
|
const VertexPatch2RowMap& vertpatch2row = vertpatch2rows[seamVertex.idxVertex]; |
|
for (IDX i=0; i<indices.size()-1; ++i) { |
|
const uint32_t idxPatch0(seamVertex.patches[indices[i]].idxPatch); |
|
const Color& color0 = vertexColors[i]; |
|
const MatIdx col0(vertpatch2row.at(idxPatch0)); |
|
for (IDX j=i+1; j<indices.size(); ++j) { |
|
const uint32_t idxPatch1(seamVertex.patches[indices[j]].idxPatch); |
|
const Color& color1 = vertexColors[j]; |
|
const MatIdx col1(vertpatch2row.at(idxPatch1)); |
|
ASSERT(idxPatch0 < idxPatch1); |
|
const MatIdx rowA((MatIdx)coeffB.size()); |
|
coeffB.Insert(color1 - color0); |
|
ASSERT(ISFINITE(coeffB.back())); |
|
rows.emplace_back(rowA, col0, 1.f); |
|
rows.emplace_back(rowA, col1, -1.f); |
|
} |
|
} |
|
} |
|
ASSERT(coeffB.size() < static_cast<IDX>(std::numeric_limits<MatIdx>::max())); |
|
|
|
const MatIdx rowsA((MatIdx)coeffB.size()); |
|
SparseMat A(rowsA, rowsX); |
|
A.setFromTriplets(rows.Begin(), rows.End()); |
|
rows.Release(); |
|
|
|
SparseMat Lhs(A.transpose() * A + Gamma.transpose() * Gamma); |
|
// CG uses only the lower triangle, so prune the rest and compress matrix |
|
Lhs.prune([](const int& row, const int& col, const float&) -> bool { |
|
return col <= row; |
|
}); |
|
|
|
// globally solve for the correction colors |
|
Eigen::Matrix<float,Eigen::Dynamic,3,Eigen::RowMajor> colorAdjustments(rowsX, 3); |
|
{ |
|
// init CG solver |
|
Eigen::ConjugateGradient<SparseMat, Eigen::Lower> solver; |
|
solver.setMaxIterations(1000); |
|
solver.setTolerance(0.0001f); |
|
solver.compute(Lhs); |
|
ASSERT(solver.info() == Eigen::Success); |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for |
|
#endif |
|
for (int channel=0; channel<3; ++channel) { |
|
// init right hand side vector |
|
const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, rowsA); |
|
const Eigen::VectorXf Rhs(SparseMat(A.transpose()) * b); |
|
// solve for x |
|
const Eigen::VectorXf x(solver.solve(Rhs)); |
|
ASSERT(solver.info() == Eigen::Success); |
|
// subtract mean since the system is under-constrained and |
|
// we need the solution with minimal adjustments |
|
Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> >(colorAdjustments.data()+channel, rowsX) = x.array() - x.mean(); |
|
DEBUG_LEVEL(3, "\tcolor channel %d: %d iterations, %g residual", channel, solver.iterations(), solver.error()); |
|
} |
|
} |
|
|
|
// adjust texture patches using the correction colors |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)numPatches; ++i) { |
|
#else |
|
for (unsigned i=0; i<numPatches; ++i) { |
|
#endif |
|
const uint32_t idxPatch((uint32_t)i); |
|
TexturePatch& texturePatch = texturePatches[idxPatch]; |
|
ColorMap imageAdj(texturePatch.rect.size()); |
|
imageAdj.memset(0); |
|
// interpolate color adjustments over the whole patch |
|
struct RasterPatch { |
|
const TexCoord* tri; |
|
Color colors[3]; |
|
ColorMap& image; |
|
inline RasterPatch(ColorMap& _image) : image(_image) {} |
|
inline cv::Size Size() const { return image.size(); } |
|
inline void operator()(const ImageRef& pt, const Point3f& bary) { |
|
ASSERT(image.isInside(pt)); |
|
image(pt) = colors[0]*bary.x + colors[1]*bary.y + colors[2]*bary.z; |
|
} |
|
} data(imageAdj); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const Face& face = faces[idxFace]; |
|
data.tri = faceTexcoords.Begin()+idxFace*3; |
|
for (int v=0; v<3; ++v){ |
|
// printf("vertpatch2rows[face[v]].size(): %zu\n", vertpatch2rows[face[v]].size()); |
|
// printf("idxPatch: %u\n", idxPatch); |
|
// auto print_key_value = [](const auto& key, const auto& value) |
|
// { |
|
// std::cout << "Key:[" << key << "] Value:[" << value << "]\n"; |
|
// }; |
|
// for (const auto& [key, value] : vertpatch2rows[face[v]]) |
|
// print_key_value(key, value); |
|
|
|
if (auto search = vertpatch2rows[face[v]].find(idxPatch); search != vertpatch2rows[face[v]].end()) |
|
data.colors[v] = colorAdjustments.row(vertpatch2rows[face[v]].at(idxPatch)); |
|
} |
|
// render triangle and for each pixel interpolate the color adjustment |
|
// from the triangle corners using barycentric coordinates |
|
ColorMap::RasterizeTriangleBary(data.tri[0], data.tri[1], data.tri[2], data); |
|
} |
|
// dilate with one pixel width, in order to make sure patch border smooths out a little |
|
imageAdj.DilateMean<1>(imageAdj, Color::ZERO); |
|
// apply color correction to the patch image |
|
cv::Mat image(images[texturePatch.label].image(texturePatch.rect)); |
|
for (int r=0; r<image.rows; ++r) { |
|
for (int c=0; c<image.cols; ++c) { |
|
const Color& a = imageAdj(r,c); |
|
if (a == Color::ZERO) |
|
continue; |
|
Pixel8U& v = image.at<Pixel8U>(r,c); |
|
const Color col(RGB2YCBCR(Color(v))); |
|
const Color acol(YCBCR2RGB(Color(col+a))); |
|
for (int p=0; p<3; ++p) |
|
v[p] = (uint8_t)CLAMP(ROUND2INT(acol[p]), 0, 255); |
|
} |
|
} |
|
} |
|
} |
|
|
|
// set to one in order to dilate also on the diagonal of the border |
|
// (normally not needed) |
|
#define DILATE_EXTRA 0 |
|
void MeshTexture::ProcessMask(Image8U& mask, int stripWidth) |
|
{ |
|
typedef Image8U::Type Type; |
|
|
|
// dilate and erode around the border, |
|
// in order to fill all gaps and remove outside pixels |
|
// (due to imperfect overlay of the raster line border and raster faces) |
|
#define DILATEDIR(rd,cd) { \ |
|
Type& vi = mask(r+(rd),c+(cd)); \ |
|
if (vi != border) \ |
|
vi = interior; \ |
|
} |
|
const int HalfSize(1); |
|
const int RowsEnd(mask.rows-HalfSize); |
|
const int ColsEnd(mask.cols-HalfSize); |
|
/* |
|
float depthThreshold = 0.1f; |
|
for (int r=0; r<mask.rows; ++r) { |
|
for (int c=0; c<mask.cols; ++c) { |
|
if (depthMap(r,c) > depthThreshold) { |
|
mask(r,c) = empty; |
|
} |
|
} |
|
} |
|
//*/ |
|
|
|
for (int r=HalfSize; r<RowsEnd; ++r) { |
|
for (int c=HalfSize; c<ColsEnd; ++c) { |
|
const Type v(mask(r,c)); |
|
if (v != border) |
|
continue; |
|
#if DILATE_EXTRA |
|
for (int i=-HalfSize; i<=HalfSize; ++i) { |
|
const int rw(r+i); |
|
for (int j=-HalfSize; j<=HalfSize; ++j) { |
|
const int cw(c+j); |
|
Type& vi = mask(rw,cw); |
|
if (vi != border) |
|
vi = interior; |
|
} |
|
} |
|
#else |
|
DILATEDIR(-1, 0); |
|
DILATEDIR(1, 0); |
|
DILATEDIR(0, -1); |
|
DILATEDIR(0, 1); |
|
#endif |
|
} |
|
} |
|
#undef DILATEDIR |
|
#define ERODEDIR(rd,cd) { \ |
|
const int rl(r-(rd)), cl(c-(cd)), rr(r+(rd)), cr(c+(cd)); \ |
|
const Type vl(mask.isInside(ImageRef(cl,rl)) ? mask(rl,cl) : uint8_t(empty)); \ |
|
const Type vr(mask.isInside(ImageRef(cr,rr)) ? mask(rr,cr) : uint8_t(empty)); \ |
|
if ((vl == border && vr == empty) || (vr == border && vl == empty)) { \ |
|
v = empty; \ |
|
continue; \ |
|
} \ |
|
} |
|
#if DILATE_EXTRA |
|
for (int i=0; i<2; ++i) |
|
#endif |
|
for (int r=0; r<mask.rows; ++r) { |
|
for (int c=0; c<mask.cols; ++c) { |
|
Type& v = mask(r,c); |
|
if (v != interior) |
|
continue; |
|
ERODEDIR(0, 1); |
|
ERODEDIR(1, 0); |
|
ERODEDIR(1, 1); |
|
ERODEDIR(-1, 1); |
|
} |
|
} |
|
#undef ERODEDIR |
|
|
|
// mark all interior pixels with empty neighbors as border |
|
for (int r=0; r<mask.rows; ++r) { |
|
for (int c=0; c<mask.cols; ++c) { |
|
Type& v = mask(r,c); |
|
if (v != interior) |
|
continue; |
|
if (mask(r-1,c) == empty || |
|
mask(r,c-1) == empty || |
|
mask(r+1,c) == empty || |
|
mask(r,c+1) == empty) |
|
v = border; |
|
} |
|
} |
|
|
|
#if 0 |
|
// mark all interior pixels with border neighbors on two sides as border |
|
{ |
|
Image8U orgMask; |
|
mask.copyTo(orgMask); |
|
for (int r=0; r<mask.rows; ++r) { |
|
for (int c=0; c<mask.cols; ++c) { |
|
Type& v = mask(r,c); |
|
if (v != interior) |
|
continue; |
|
if ((orgMask(r+1,c+0) == border && orgMask(r+0,c+1) == border) || |
|
(orgMask(r+1,c+0) == border && orgMask(r-0,c-1) == border) || |
|
(orgMask(r-1,c-0) == border && orgMask(r+0,c+1) == border) || |
|
(orgMask(r-1,c-0) == border && orgMask(r-0,c-1) == border)) |
|
v = border; |
|
} |
|
} |
|
} |
|
#endif |
|
|
|
// compute the set of valid pixels at the border of the texture patch |
|
#define ISEMPTY(mask, x,y) (mask(y,x) == empty) |
|
const int width(mask.width()), height(mask.height()); |
|
typedef std::unordered_set<ImageRef> PixelSet; |
|
PixelSet borderPixels; |
|
for (int y=0; y<height; ++y) { |
|
for (int x=0; x<width; ++x) { |
|
if (ISEMPTY(mask, x,y)) |
|
continue; |
|
// valid border pixels need no invalid neighbors |
|
if (x == 0 || x == width - 1 || y == 0 || y == height - 1) { |
|
borderPixels.insert(ImageRef(x,y)); |
|
continue; |
|
} |
|
// check the direct neighborhood of all invalid pixels |
|
for (int j=-1; j<=1; ++j) { |
|
for (int i=-1; i<=1; ++i) { |
|
// if the valid pixel has an invalid neighbor... |
|
const int xn(x+i), yn(y+j); |
|
if (ISINSIDE(xn, 0, width) && |
|
ISINSIDE(yn, 0, height) && |
|
ISEMPTY(mask, xn,yn)) { |
|
// add the pixel to the set of valid border pixels |
|
borderPixels.insert(ImageRef(x,y)); |
|
goto CONTINUELOOP; |
|
} |
|
} |
|
} |
|
CONTINUELOOP:; |
|
} |
|
} |
|
|
|
// iteratively erode all border pixels |
|
{ |
|
Image8U orgMask; |
|
mask.copyTo(orgMask); |
|
typedef std::vector<ImageRef> PixelVector; |
|
for (int s=0; s<stripWidth; ++s) { |
|
PixelVector emptyPixels(borderPixels.begin(), borderPixels.end()); |
|
borderPixels.clear(); |
|
// mark the new empty pixels as empty in the mask |
|
for (PixelVector::const_iterator it=emptyPixels.cbegin(); it!=emptyPixels.cend(); ++it) |
|
orgMask(*it) = empty; |
|
// find the set of valid pixels at the border of the valid area |
|
for (PixelVector::const_iterator it=emptyPixels.cbegin(); it!=emptyPixels.cend(); ++it) { |
|
for (int j=-1; j<=1; j++) { |
|
for (int i=-1; i<=1; i++) { |
|
const int xn(it->x+i), yn(it->y+j); |
|
if (ISINSIDE(xn, 0, width) && |
|
ISINSIDE(yn, 0, height) && |
|
!ISEMPTY(orgMask, xn, yn)) |
|
borderPixels.insert(ImageRef(xn,yn)); |
|
} |
|
} |
|
} |
|
} |
|
#undef ISEMPTY |
|
|
|
// mark all remaining pixels empty in the mask |
|
for (int y=0; y<height; ++y) { |
|
for (int x=0; x<width; ++x) { |
|
if (orgMask(y,x) != empty) |
|
mask(y,x) = empty; |
|
} |
|
} |
|
} |
|
|
|
// mark all border pixels |
|
for (PixelSet::const_iterator it=borderPixels.cbegin(); it!=borderPixels.cend(); ++it) |
|
mask(*it) = border; |
|
|
|
#if 0 |
|
// dilate border |
|
{ |
|
Image8U orgMask; |
|
mask.copyTo(orgMask); |
|
for (int r=HalfSize; r<RowsEnd; ++r) { |
|
for (int c=HalfSize; c<ColsEnd; ++c) { |
|
const Type v(orgMask(r, c)); |
|
if (v != border) |
|
continue; |
|
for (int i=-HalfSize; i<=HalfSize; ++i) { |
|
const int rw(r+i); |
|
for (int j=-HalfSize; j<=HalfSize; ++j) { |
|
const int cw(c+j); |
|
Type& vi = mask(rw, cw); |
|
if (vi == empty) |
|
vi = border; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
#endif |
|
} |
|
|
|
inline MeshTexture::Color ColorLaplacian(const Image32F3& img, int i) { |
|
const int width(img.width()); |
|
return img(i-width) + img(i-1) + img(i+1) + img(i+width) - img(i)*4.f; |
|
} |
|
|
|
void MeshTexture::PoissonBlending(const Image32F3& src, Image32F3& dst, const Image8U& mask, float bias) |
|
{ |
|
ASSERT(src.width() == mask.width() && src.width() == dst.width()); |
|
ASSERT(src.height() == mask.height() && src.height() == dst.height()); |
|
ASSERT(src.channels() == 3 && dst.channels() == 3 && mask.channels() == 1); |
|
ASSERT(src.type() == CV_32FC3 && dst.type() == CV_32FC3 && mask.type() == CV_8U); |
|
|
|
#ifndef _RELEASE |
|
// check the mask border has no pixels marked as interior |
|
for (int x=0; x<mask.cols; ++x) |
|
ASSERT(mask(0,x) != interior && mask(mask.rows-1,x) != interior); |
|
for (int y=0; y<mask.rows; ++y) |
|
ASSERT(mask(y,0) != interior && mask(y,mask.cols-1) != interior); |
|
#endif |
|
|
|
const int n(dst.area()); |
|
const int width(dst.width()); |
|
|
|
TImage<MatIdx> indices(dst.size()); |
|
indices.memset(0xff); |
|
MatIdx nnz(0); |
|
for (int i = 0; i < n; ++i) |
|
if (mask(i) != empty) |
|
indices(i) = nnz++; |
|
if (nnz <= 0) |
|
return; |
|
|
|
Colors coeffB(nnz); |
|
CLISTDEF0(MatEntry) coeffA(0, nnz); |
|
for (int i = 0; i < n; ++i) { |
|
switch (mask(i)) { |
|
case border: { |
|
const MatIdx idx(indices(i)); |
|
ASSERT(idx != -1); |
|
coeffA.emplace_back(idx, idx, 1.f); |
|
coeffB[idx] = (const Color&)dst(i); |
|
} break; |
|
case interior: { |
|
const MatIdx idxUp(indices(i - width)); |
|
const MatIdx idxLeft(indices(i - 1)); |
|
const MatIdx idxCenter(indices(i)); |
|
const MatIdx idxRight(indices(i + 1)); |
|
const MatIdx idxDown(indices(i + width)); |
|
// all indices should be either border conditions or part of the optimization |
|
ASSERT(idxUp != -1 && idxLeft != -1 && idxCenter != -1 && idxRight != -1 && idxDown != -1); |
|
coeffA.emplace_back(idxCenter, idxUp, 1.f); |
|
coeffA.emplace_back(idxCenter, idxLeft, 1.f); |
|
coeffA.emplace_back(idxCenter, idxCenter,-4.f); |
|
coeffA.emplace_back(idxCenter, idxRight, 1.f); |
|
coeffA.emplace_back(idxCenter, idxDown, 1.f); |
|
// set target coefficient |
|
coeffB[idxCenter] = (bias == 1.f ? |
|
ColorLaplacian(src,i) : |
|
ColorLaplacian(src,i)*bias + ColorLaplacian(dst,i)*(1.f-bias)); |
|
} break; |
|
} |
|
} |
|
|
|
SparseMat A(nnz, nnz); |
|
A.setFromTriplets(coeffA.Begin(), coeffA.End()); |
|
coeffA.Release(); |
|
|
|
#ifdef TEXOPT_SOLVER_SPARSELU |
|
// use SparseLU factorization |
|
// (faster, but not working if EIGEN_DEFAULT_TO_ROW_MAJOR is defined, bug inside Eigen) |
|
const Eigen::SparseLU< SparseMat, Eigen::COLAMDOrdering<MatIdx> > solver(A); |
|
#else |
|
// use BiCGSTAB solver |
|
const Eigen::BiCGSTAB< SparseMat, Eigen::IncompleteLUT<float> > solver(A); |
|
#endif |
|
ASSERT(solver.info() == Eigen::Success); |
|
for (int channel=0; channel<3; ++channel) { |
|
const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, nnz); |
|
const Eigen::VectorXf x(solver.solve(b)); |
|
ASSERT(solver.info() == Eigen::Success); |
|
for (int i = 0; i < n; ++i) { |
|
const MatIdx index(indices(i)); |
|
if (index != -1) |
|
dst(i)[channel] = x[index]; |
|
} |
|
} |
|
} |
|
|
|
// Native |
|
void MeshTexture::LocalSeamLeveling3() |
|
{ |
|
ASSERT(!seamVertices.empty()); |
|
const unsigned numPatches(texturePatches.size()-1); |
|
|
|
// adjust texture patches locally, so that the border continues smoothly inside the patch |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)numPatches; ++i) { |
|
#else |
|
for (unsigned i=0; i<numPatches; ++i) { |
|
#endif |
|
const uint32_t idxPatch((uint32_t)i); |
|
const TexturePatch& texturePatch = texturePatches[idxPatch]; |
|
// extract image |
|
const Image8U3& image0(images[texturePatch.label].image); |
|
Image32F3 image, imageOrg; |
|
#ifdef USE_CUDA |
|
if (MeshTextureCUDA::ConvertToCUDA(image0(texturePatch.rect), image, 1.0/255.0)) |
|
{ |
|
// printf("ConvertToCUDA Successed!\n"); |
|
} |
|
else |
|
{ |
|
image0(texturePatch.rect).convertTo(image, CV_32FC3, 1.0/255.0); |
|
// printf("ConvertToCUDA Failed!\n"); |
|
} |
|
#else |
|
image0(texturePatch.rect).convertTo(image, CV_32FC3, 1.0/255.0); |
|
#endif |
|
|
|
image.copyTo(imageOrg); |
|
// render patch coverage |
|
Image8U mask(image.size()); { |
|
mask.memset(0); |
|
struct RasterMesh { |
|
Image8U& image; |
|
inline void operator()(const ImageRef& pt) { |
|
ASSERT(image.isInside(pt)); |
|
image(pt) = interior; |
|
} |
|
} data{mask}; |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const TexCoord* tri = faceTexcoords.data()+idxFace*3; |
|
ColorMap::RasterizeTriangle(tri[0], tri[1], tri[2], data); |
|
} |
|
} |
|
// render the patch border meeting neighbor patches |
|
const Sampler sampler; |
|
const TexCoord offset(texturePatch.rect.tl()); |
|
for (const SeamVertex& seamVertex0: seamVertices) { |
|
if (seamVertex0.patches.size() < 2) |
|
continue; |
|
const uint32_t idxVertPatch0(seamVertex0.patches.Find(idxPatch)); |
|
if (idxVertPatch0 == SeamVertex::Patches::NO_INDEX) |
|
continue; |
|
const SeamVertex::Patch& patch0 = seamVertex0.patches[idxVertPatch0]; |
|
const TexCoord p0(patch0.proj-offset); |
|
// for each edge of this vertex belonging to this patch... |
|
for (const SeamVertex::Patch::Edge& edge0: patch0.edges) { |
|
// select the same edge leaving from the adjacent vertex |
|
const SeamVertex& seamVertex1 = seamVertices[edge0.idxSeamVertex]; |
|
const uint32_t idxVertPatch0Adj(seamVertex1.patches.Find(idxPatch)); |
|
ASSERT(idxVertPatch0Adj != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch0Adj = seamVertex1.patches[idxVertPatch0Adj]; |
|
const TexCoord p0Adj(patch0Adj.proj-offset); |
|
// find the other patch sharing the same edge (edge with same adjacent vertex) |
|
FOREACH(idxVertPatch1, seamVertex0.patches) { |
|
if (idxVertPatch1 == idxVertPatch0) |
|
continue; |
|
const SeamVertex::Patch& patch1 = seamVertex0.patches[idxVertPatch1]; |
|
const uint32_t idxEdge1(patch1.edges.Find(edge0.idxSeamVertex)); |
|
if (idxEdge1 == SeamVertex::Patch::Edges::NO_INDEX) |
|
continue; |
|
const TexCoord& p1(patch1.proj); |
|
// select the same edge belonging to the second patch leaving from the adjacent vertex |
|
const uint32_t idxVertPatch1Adj(seamVertex1.patches.Find(patch1.idxPatch)); |
|
ASSERT(idxVertPatch1Adj != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch1Adj = seamVertex1.patches[idxVertPatch1Adj]; |
|
const TexCoord& p1Adj(patch1Adj.proj); |
|
// this is an edge separating two (valid) patches; |
|
// draw it on this patch as the mean color of the two patches |
|
const Image8U3& image1(images[texturePatches[patch1.idxPatch].label].image); |
|
struct RasterPatch { |
|
Image32F3& image; |
|
Image8U& mask; |
|
const Image32F3& image0; |
|
const Image8U3& image1; |
|
const TexCoord p0, p0Dir; |
|
const TexCoord p1, p1Dir; |
|
const float length; |
|
const Sampler sampler; |
|
inline RasterPatch(Image32F3& _image, Image8U& _mask, const Image32F3& _image0, const Image8U3& _image1, |
|
const TexCoord& _p0, const TexCoord& _p0Adj, const TexCoord& _p1, const TexCoord& _p1Adj) |
|
: image(_image), mask(_mask), image0(_image0), image1(_image1), |
|
p0(_p0), p0Dir(_p0Adj-_p0), p1(_p1), p1Dir(_p1Adj-_p1), length((float)norm(p0Dir)), sampler() {} |
|
inline void operator()(const ImageRef& pt) { |
|
const float l((float)norm(TexCoord(pt)-p0)/length); |
|
// compute mean color |
|
const TexCoord samplePos0(p0 + p0Dir * l); |
|
const Color color0(image0.sample<Sampler,Color>(sampler, samplePos0)); |
|
const TexCoord samplePos1(p1 + p1Dir * l); |
|
const Color color1(image1.sample<Sampler,Color>(sampler, samplePos1)/255.f); |
|
image(pt) = Color((color0 + color1) * 0.5f); |
|
// set mask edge also |
|
mask(pt) = border; |
|
} |
|
} data(image, mask, imageOrg, image1, p0, p0Adj, p1, p1Adj); |
|
Image32F3::DrawLine(p0, p0Adj, data); |
|
// skip remaining patches, |
|
// as a manifold edge is shared by maximum two face (one in each patch), which we found already |
|
break; |
|
} |
|
} |
|
// render the vertex at the patch border meeting neighbor patches |
|
AccumColor accumColor; |
|
// for each patch... |
|
for (const SeamVertex::Patch& patch: seamVertex0.patches) { |
|
// add its view to the vertex mean color |
|
const Image8U3& img(images[texturePatches[patch.idxPatch].label].image); |
|
accumColor.Add(img.sample<Sampler,Color>(sampler, patch.proj)/255.f, 1.f); |
|
} |
|
const ImageRef pt(ROUND2INT(patch0.proj-offset)); |
|
image(pt) = accumColor.Normalized(); |
|
mask(pt) = border; |
|
} |
|
// make sure the border is continuous and |
|
// keep only the exterior tripe of the given size |
|
#ifdef USE_CUDA |
|
if (MeshTextureCUDA::ProcessMaskCUDA(mask, 20)) |
|
{ |
|
// printf("Success ProcessMaskCUDA!\n"); |
|
// 成功使用CUDA加速 |
|
} |
|
else |
|
{ |
|
// 回退到CPU版本 |
|
// printf("Failed ProcessMaskCUDA!\n"); |
|
ProcessMask(mask, 20); |
|
} |
|
#else |
|
ProcessMask(mask, 20); |
|
#endif |
|
|
|
// compute texture patch blending |
|
#ifdef USE_CUDA |
|
if (MeshTextureCUDA::PoissonBlendCUDA(image, imageOrg, mask, 1.0f)) |
|
{ |
|
// printf("Success PoissonBlendCUDA!"); |
|
// 成功使用CUDA加速 |
|
} |
|
else |
|
{ |
|
// 回退到CPU版本 |
|
// printf("Failed PoissonBlendCUDA!"); |
|
PoissonBlending(imageOrg, image, mask, 1.0f); |
|
} |
|
#else |
|
PoissonBlending(imageOrg, image, mask); |
|
#endif |
|
// apply color correction to the patch image |
|
cv::Mat imagePatch(image0(texturePatch.rect)); |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for collapse(2) |
|
#endif |
|
for (int r=0; r<image.rows; ++r) { |
|
for (int c=0; c<image.cols; ++c) { |
|
if (mask(r,c) == empty) |
|
continue; |
|
const Color& a = image(r,c); |
|
Pixel8U& v = imagePatch.at<Pixel8U>(r,c); |
|
for (int p=0; p<3; ++p) |
|
v[p] = (uint8_t)CLAMP(ROUND2INT(a[p]*255.f), 0, 255); |
|
} |
|
} |
|
} |
|
} |
|
|
|
void MeshTexture::GenerateTexture(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename, bool bOriginFaceview) |
|
{ |
|
// Bruce |
|
// bGlobalSeamLeveling = false; |
|
// bLocalSeamLeveling = false; |
|
// project patches in the corresponding view and compute texture-coordinates and bounding-box |
|
// Bruce |
|
int border = 2; |
|
if (!bOriginFaceview) |
|
border = 4; |
|
faceTexcoords.resize(faces.size()*3); |
|
faceTexindices.resize(faces.size()); |
|
#ifdef TEXOPT_USE_OPENMP |
|
// LOG_OUT() << "def TEXOPT_USE_OPENMP" << std::endl; |
|
const unsigned numPatches(texturePatches.size()-1); |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int_t idx=0; idx<(int_t)numPatches; ++idx) { |
|
TexturePatch& texturePatch = texturePatches[(uint32_t)idx]; |
|
#else |
|
for (TexturePatch *pTexturePatch=texturePatches.Begin(), *pTexturePatchEnd=texturePatches.End()-1; pTexturePatch<pTexturePatchEnd; ++pTexturePatch) { |
|
TexturePatch& texturePatch = *pTexturePatch; |
|
#endif |
|
const Image& imageData = images[texturePatch.label]; |
|
AABB2f aabb(true); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const Face& face = faces[idxFace]; |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int i=0; i<3; ++i) { |
|
texcoords[i] = imageData.camera.ProjectPointP(vertices[face[i]]); |
|
ASSERT(imageData.image.isInsideWithBorder(texcoords[i], border)); |
|
aabb.InsertFull(texcoords[i]); |
|
} |
|
} |
|
// compute relative texture coordinates |
|
ASSERT(imageData.image.isInside(Point2f(aabb.ptMin))); |
|
ASSERT(imageData.image.isInside(Point2f(aabb.ptMax))); |
|
|
|
if (bOriginFaceview) |
|
{ |
|
texturePatch.rect.x = FLOOR2INT(aabb.ptMin[0])-border; |
|
texturePatch.rect.y = FLOOR2INT(aabb.ptMin[1])-border; |
|
texturePatch.rect.width = CEIL2INT(aabb.ptMax[0]-aabb.ptMin[0])+border*2; |
|
texturePatch.rect.height = CEIL2INT(aabb.ptMax[1]-aabb.ptMin[1])+border*2; |
|
} |
|
else |
|
{ |
|
texturePatch.rect.x = std::max(0, FLOOR2INT(aabb.ptMin[0])-border); |
|
texturePatch.rect.y = std::max(0, FLOOR2INT(aabb.ptMin[1])-border); |
|
// 限制尺寸不超过图像实际范围 |
|
const cv::Mat& img = images[texturePatch.label].image; |
|
texturePatch.rect.width = std::min( |
|
CEIL2INT(aabb.ptMax[0]-aabb.ptMin[0])+border*2, |
|
img.cols - texturePatch.rect.x |
|
); |
|
texturePatch.rect.height = std::min( |
|
CEIL2INT(aabb.ptMax[1]-aabb.ptMin[1])+border*2, |
|
img.rows - texturePatch.rect.y |
|
); |
|
} |
|
|
|
/* |
|
// 设置 texturePatch.rect 后添加边界检查 |
|
texturePatch.rect.width = std::max(1, texturePatch.rect.width); // 宽度至少为1 |
|
texturePatch.rect.height = std::max(1, texturePatch.rect.height); // 高度至少为1 |
|
|
|
// 检查ROI是否超出图像实际范围 |
|
if (texturePatch.rect.x < 0) { |
|
texturePatch.rect.width += texturePatch.rect.x; // 修正负起点 |
|
texturePatch.rect.x = 0; |
|
} |
|
if (texturePatch.rect.y < 0) { |
|
texturePatch.rect.height += texturePatch.rect.y; |
|
texturePatch.rect.y = 0; |
|
} |
|
// 确保ROI不超出图像右/下边界 |
|
texturePatch.rect.width = std::min(texturePatch.rect.width, imageData.image.cols - texturePatch.rect.x); |
|
texturePatch.rect.height = std::min(texturePatch.rect.height, imageData.image.rows - texturePatch.rect.y); |
|
//*/ |
|
|
|
ASSERT(imageData.image.isInside(texturePatch.rect.tl())); |
|
ASSERT(imageData.image.isInside(texturePatch.rect.br())); |
|
const TexCoord offset(texturePatch.rect.tl()); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int v=0; v<3; ++v) |
|
texcoords[v] -= offset; |
|
} |
|
} |
|
{ |
|
// init last patch to point to a small uniform color patch |
|
TexturePatch& texturePatch = texturePatches.back(); |
|
const int sizePatch(border*2+1); |
|
texturePatch.rect = cv::Rect(0,0, sizePatch,sizePatch); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int i=0; i<3; ++i) |
|
texcoords[i] = TexCoord(0.5f, 0.5f); |
|
} |
|
} |
|
|
|
|
|
LOG_OUT() << "First loop completed" << std::endl; |
|
|
|
TD_TIMER_STARTD(); |
|
// perform seam leveling |
|
if (texturePatches.size() > 2 && (bGlobalSeamLeveling || bLocalSeamLeveling)) { |
|
// create seam vertices and edges |
|
CreateSeamVertices(); |
|
|
|
// perform global seam leveling |
|
if (bGlobalSeamLeveling) { |
|
TD_TIMER_STARTD(); |
|
GlobalSeamLeveling3(); |
|
DEBUG_EXTRA("\tglobal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
// perform local seam leveling |
|
if (bLocalSeamLeveling) { |
|
TD_TIMER_STARTD(); |
|
// LocalSeamLeveling(); |
|
LocalSeamLeveling3(); |
|
DEBUG_EXTRA("\tlocal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); |
|
} |
|
} |
|
DEBUG_EXTRA("seam (%s)", TD_TIMER_GET_FMT().c_str()); |
|
|
|
// merge texture patches with overlapping rectangles |
|
for (unsigned i=0; i<texturePatches.size()-1; ++i) { |
|
TexturePatch& texturePatchBig = texturePatches[i]; |
|
for (unsigned j=1; j<texturePatches.size(); ++j) { |
|
if (i == j) |
|
continue; |
|
TexturePatch& texturePatchSmall = texturePatches[j]; |
|
if (texturePatchBig.label != texturePatchSmall.label) |
|
continue; |
|
if (!RectsBinPack::IsContainedIn(texturePatchSmall.rect, texturePatchBig.rect)) |
|
continue; |
|
// translate texture coordinates |
|
const TexCoord offset(texturePatchSmall.rect.tl()-texturePatchBig.rect.tl()); |
|
for (const FIndex idxFace: texturePatchSmall.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int v=0; v<3; ++v) |
|
texcoords[v] += offset; |
|
} |
|
// join faces lists |
|
texturePatchBig.faces.JoinRemove(texturePatchSmall.faces); |
|
// remove the small patch |
|
texturePatches.RemoveAtMove(j--); |
|
} |
|
} |
|
|
|
LOG_OUT() << "Second loop completed" << std::endl; |
|
// create texture |
|
{ |
|
// arrange texture patches to fit the smallest possible texture image |
|
// const unsigned minPatchSize = 20; |
|
RectsBinPack::RectWIdxArr unplacedRects(texturePatches.size()); |
|
FOREACH(i, texturePatches) { |
|
if (texturePatches[i].label == NO_ID) { |
|
// 将无效面片区域填充为绿色 |
|
// texturesDiffuse[i](texturePatches[i].rect).setTo(cv::Scalar(0, 255, 0)); // BGR格式,绿色 |
|
// continue; |
|
} |
|
|
|
// LOG_OUT() << "Third loop completed" << std::endl; |
|
if (maxTextureSize > 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { |
|
DEBUG("error: a patch of size %u x %u does not fit the texture", texturePatches[i].rect.width, texturePatches[i].rect.height); |
|
ABORT("the maximum texture size chosen cannot fit a patch"); |
|
} |
|
unplacedRects[i] = {texturePatches[i].rect, i}; |
|
} |
|
LOG_OUT() << "unplacedRects loop completed" << std::endl; |
|
|
|
LOG_OUT() << "pack patches: one pack per texture file loop completed" << std::endl; |
|
// pack patches: one pack per texture file |
|
CLISTDEF2IDX(RectsBinPack::RectWIdxArr, TexIndex) placedRects; { |
|
// increase texture size till all patches fit |
|
// Bruce |
|
unsigned typeRectsBinPack(nRectPackingHeuristic/100); |
|
unsigned typeSplit((nRectPackingHeuristic-typeRectsBinPack*100)/10); |
|
unsigned typeHeuristic(nRectPackingHeuristic%10); |
|
if (!bOriginFaceview && false) |
|
{ |
|
typeRectsBinPack = 1; |
|
typeSplit = 0; |
|
typeHeuristic = 1; |
|
} |
|
int textureSize = 0; |
|
while (!unplacedRects.empty()) { |
|
TD_TIMER_STARTD(); |
|
if (textureSize == 0) { |
|
textureSize = RectsBinPack::ComputeTextureSize(unplacedRects, nTextureSizeMultiple); |
|
if (maxTextureSize > 0 && textureSize > maxTextureSize) |
|
textureSize = maxTextureSize; |
|
} |
|
|
|
RectsBinPack::RectWIdxArr newPlacedRects; |
|
switch (typeRectsBinPack) { |
|
case 0: { |
|
MaxRectsBinPack pack(textureSize, textureSize); |
|
newPlacedRects = pack.Insert(unplacedRects, (MaxRectsBinPack::FreeRectChoiceHeuristic)typeHeuristic); |
|
break; } |
|
case 1: { |
|
SkylineBinPack pack(textureSize, textureSize, typeSplit!=0); |
|
newPlacedRects = pack.Insert(unplacedRects, (SkylineBinPack::LevelChoiceHeuristic)typeHeuristic); |
|
break; } |
|
case 2: { |
|
GuillotineBinPack pack(textureSize, textureSize); |
|
newPlacedRects = pack.Insert(unplacedRects, false, (GuillotineBinPack::FreeRectChoiceHeuristic)typeHeuristic, (GuillotineBinPack::GuillotineSplitHeuristic)typeSplit); |
|
break; } |
|
default: |
|
ABORT("error: unknown RectsBinPack type"); |
|
} |
|
DEBUG_ULTIMATE("\tpacking texture completed: %u initial patches, %u placed patches, %u texture-size, %u textures (%s)", texturePatches.size(), newPlacedRects.size(), textureSize, placedRects.size(), TD_TIMER_GET_FMT().c_str()); |
|
|
|
if (textureSize == maxTextureSize || unplacedRects.empty()) { |
|
// create texture image |
|
placedRects.emplace_back(std::move(newPlacedRects)); |
|
// Pixel8U colEmpty2=Pixel8U(0,0,255); |
|
texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); |
|
textureSize = 0; |
|
} else { |
|
// try again with a bigger texture |
|
textureSize *= 2; |
|
if (maxTextureSize > 0) |
|
textureSize = std::max(textureSize, maxTextureSize); |
|
unplacedRects.JoinRemove(newPlacedRects); |
|
} |
|
} |
|
} |
|
LOG_OUT() << "Third loop completed" << std::endl; |
|
Mesh::FaceIdxArr emptyFaceIndexes; |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int_t i=0; i<(int_t)placedRects.size(); ++i) { |
|
for (int_t j=0; j<(int_t)placedRects[(TexIndex)i].size(); ++j) { |
|
const TexIndex idxTexture((TexIndex)i); |
|
const uint32_t idxPlacedPatch((uint32_t)j); |
|
#else |
|
FOREACH(idxTexture, placedRects) { |
|
FOREACH(idxPlacedPatch, placedRects[idxTexture]) { |
|
#endif |
|
const TexturePatch& texturePatch = texturePatches[placedRects[idxTexture][idxPlacedPatch].patchIdx]; |
|
const RectsBinPack::Rect& rect = placedRects[idxTexture][idxPlacedPatch].rect; |
|
// copy patch image |
|
ASSERT((rect.width == texturePatch.rect.width && rect.height == texturePatch.rect.height) || |
|
(rect.height == texturePatch.rect.width && rect.width == texturePatch.rect.height)); |
|
int x(0), y(1); |
|
if (texturePatch.label != NO_ID) { |
|
const Image& imageData = images[texturePatch.label]; |
|
cv::Mat patch(imageData.image(texturePatch.rect)); |
|
if (rect.width != texturePatch.rect.width) { |
|
// flip patch and texture-coordinates |
|
patch = patch.t(); |
|
x = 1; y = 0; |
|
} |
|
|
|
patch.copyTo(texturesDiffuse[idxTexture](rect)); |
|
} |
|
else |
|
{ |
|
//* |
|
auto it = texturePatch.faces.begin(); |
|
while (it != texturePatch.faces.end()) |
|
{ |
|
emptyFaceIndexes.push_back(*it); |
|
++it; |
|
} |
|
//*/ |
|
/* |
|
// 处理无效贴片:使用备用纹理 |
|
if (alternativeTexture != nullptr) { |
|
// 使用备用纹理进行采样 |
|
cv::Mat patch(rect.size(), CV_8UC3); |
|
for (int r = 0; r < patch.rows; ++r) { |
|
for (int c = 0; c < patch.cols; ++c) { |
|
// 计算UV坐标:将像素位置映射到备用纹理的UV空间 |
|
float u = (float)c / patch.cols; |
|
float v = (float)r / patch.rows; |
|
// 从备用纹理中采样 |
|
int xSrc = static_cast<int>(u * alternativeTexture->width()); |
|
int ySrc = static_cast<int>(v * alternativeTexture->height()); |
|
xSrc = std::min(std::max(xSrc, 0), alternativeTexture->width() - 1); |
|
ySrc = std::min(std::max(ySrc, 0), alternativeTexture->height() - 1); |
|
Pixel8U color = (*alternativeTexture)(ySrc, xSrc); |
|
patch.at<Pixel8U>(r, c) = color; |
|
} |
|
} |
|
// Pixel8U colEmpty2=Pixel8U(0,0,255); |
|
// cv::Mat patch2(rect.size(), CV_8UC3, cv::Scalar(colEmpty2.b, colEmpty2.g, colEmpty2.r)); |
|
// patch2.copyTo(texturesDiffuse[idxTexture](rect)); |
|
patch.copyTo(texturesDiffuse[idxTexture](rect)); |
|
} else { |
|
// 没有备用纹理,使用默认颜色 |
|
// Pixel8U colEmpty2=Pixel8U(0,0,255); |
|
cv::Mat patch(rect.size(), CV_8UC3, cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); |
|
patch.copyTo(texturesDiffuse[idxTexture](rect)); |
|
} |
|
*/ |
|
} |
|
// compute final texture coordinates |
|
const TexCoord offset(rect.tl()); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
faceTexindices[idxFace] = idxTexture; |
|
for (int v=0; v<3; ++v) { |
|
TexCoord& texcoord = texcoords[v]; |
|
texcoord = TexCoord( |
|
texcoord[x]+offset.x, |
|
texcoord[y]+offset.y |
|
); |
|
} |
|
} |
|
} |
|
} |
|
if (texturesDiffuse.size() == 1) |
|
faceTexindices.Release(); |
|
|
|
/* |
|
// Lab颜色空间处理(解决过曝/过暗问题) |
|
if (bGlobalSeamLeveling) { |
|
for (auto& texture : texturesDiffuse) { |
|
// 安全检查:空图像、非3通道BGR格式 |
|
if (texture.empty() || texture.channels() != 3) { |
|
LOG_OUT() << "Skipping invalid texture: empty=" << texture.empty() |
|
<< ", channels=" << texture.channels() << std::endl; |
|
continue; |
|
} |
|
|
|
// 创建临时Lab图像 |
|
cv::Mat labImage; |
|
cv::cvtColor(texture, labImage, cv::COLOR_BGR2Lab); |
|
|
|
// 分离通道 |
|
std::vector<cv::Mat> labChannels; |
|
cv::split(labImage, labChannels); |
|
|
|
// 使用CLAHE限制对比度增强 |
|
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(); |
|
clahe->setClipLimit(2.0); |
|
clahe->apply(labChannels[0], labChannels[0]); |
|
|
|
// 钳制Lab通道数值范围(防溢出) |
|
// 使用安全循环替代forEach,避免内存问题 |
|
for (int r = 0; r < labImage.rows; ++r) { |
|
for (int c = 0; c < labImage.cols; ++c) { |
|
cv::Vec3b& pixel = labImage.at<cv::Vec3b>(r, c); // 注意:这里使用Vec3b而非Vec3f |
|
pixel[0] = cv::saturate_cast<uchar>(pixel[0]); // L ∈ [0,255] |
|
pixel[1] = cv::saturate_cast<uchar>(pixel[1]); // a ∈ [0,255] |
|
pixel[2] = cv::saturate_cast<uchar>(pixel[2]); // b ∈ [0,255] |
|
} |
|
} |
|
|
|
// 合并通道并转回BGR |
|
cv::merge(labChannels, labImage); |
|
cv::cvtColor(labImage, texture, cv::COLOR_Lab2BGR); |
|
} |
|
} |
|
*/ |
|
|
|
// apply some sharpening |
|
if (fSharpnessWeight > 0) { |
|
constexpr double sigma = 1.5; |
|
for (auto &textureDiffuse: texturesDiffuse) { |
|
Image8U3 blurryTextureDiffuse; |
|
cv::GaussianBlur(textureDiffuse, blurryTextureDiffuse, cv::Size(), sigma); |
|
cv::addWeighted(textureDiffuse, 1+fSharpnessWeight, blurryTextureDiffuse, -fSharpnessWeight, 0, textureDiffuse); |
|
} |
|
} |
|
LOG_OUT() << "Fourth loop completed" << std::endl; |
|
|
|
std::ofstream out(basename + "_empty_color_triangles.txt"); |
|
RFOREACHPTR(pIdxF, emptyFaceIndexes) { |
|
out << *pIdxF << "\n"; |
|
} |
|
out.close(); |
|
} |
|
} |
|
|
|
// New |
|
void MeshTexture::GlobalSeamLeveling() |
|
{ |
|
ASSERT(!seamVertices.empty()); |
|
const unsigned numPatches(texturePatches.size()-1); |
|
// find the patch ID for each vertex |
|
PatchIndices patchIndices(vertices.size()); |
|
patchIndices.Memset(0); |
|
FOREACH(f, faces) { |
|
const uint32_t idxPatch(mapIdxPatch[components[f]]); |
|
const Face& face = faces[f]; |
|
for (int v=0; v<3; ++v) |
|
patchIndices[face[v]].idxPatch = idxPatch; |
|
} |
|
FOREACH(i, seamVertices) { |
|
const SeamVertex& seamVertex = seamVertices[i]; |
|
ASSERT(!seamVertex.patches.empty()); |
|
PatchIndex& patchIndex = patchIndices[seamVertex.idxVertex]; |
|
patchIndex.bIndex = true; |
|
patchIndex.idxSeamVertex = i; |
|
} |
|
|
|
// assign a row index within the solution vector x to each vertex/patch |
|
ASSERT(vertices.size() < static_cast<VIndex>(std::numeric_limits<MatIdx>::max())); |
|
MatIdx rowsX(0); |
|
typedef std::unordered_map<uint32_t,MatIdx> VertexPatch2RowMap; |
|
cList<VertexPatch2RowMap> vertpatch2rows(vertices.size()); |
|
FOREACH(i, vertices) { |
|
const PatchIndex& patchIndex = patchIndices[i]; |
|
VertexPatch2RowMap& vertpatch2row = vertpatch2rows[i]; |
|
if (patchIndex.bIndex) { |
|
// vertex is part of multiple patches |
|
const SeamVertex& seamVertex = seamVertices[patchIndex.idxSeamVertex]; |
|
ASSERT(seamVertex.idxVertex == i); |
|
for (const SeamVertex::Patch& patch: seamVertex.patches) { |
|
ASSERT(patch.idxPatch != numPatches); |
|
vertpatch2row[patch.idxPatch] = rowsX++; |
|
} |
|
} else |
|
if (patchIndex.idxPatch < numPatches) { |
|
// vertex is part of only one patch |
|
vertpatch2row[patchIndex.idxPatch] = rowsX++; |
|
} |
|
} |
|
|
|
// fill Tikhonov's Gamma matrix (regularization constraints) |
|
// Bruce |
|
// const float lambda(0.1f); |
|
const float lambda(0.8f); |
|
|
|
MatIdx rowsGamma(0); |
|
Mesh::VertexIdxArr adjVerts; |
|
CLISTDEF0(MatEntry) rows(0, vertices.size()*4); |
|
FOREACH(v, vertices) { |
|
adjVerts.Empty(); |
|
scene.mesh.GetAdjVertices(v, adjVerts); |
|
VertexPatchIterator itV(patchIndices[v], seamVertices); |
|
while (itV.Next()) { |
|
const uint32_t idxPatch(itV); |
|
if (idxPatch == numPatches) |
|
continue; |
|
const MatIdx col(vertpatch2rows[v].at(idxPatch)); |
|
for (const VIndex vAdj: adjVerts) { |
|
if (v >= vAdj) |
|
continue; |
|
VertexPatchIterator itVAdj(patchIndices[vAdj], seamVertices); |
|
while (itVAdj.Next()) { |
|
const uint32_t idxPatchAdj(itVAdj); |
|
if (idxPatch == idxPatchAdj) { |
|
const MatIdx colAdj(vertpatch2rows[vAdj].at(idxPatchAdj)); |
|
rows.emplace_back(rowsGamma, col, lambda); |
|
rows.emplace_back(rowsGamma, colAdj, -lambda); |
|
++rowsGamma; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
ASSERT(rows.size()/2 < static_cast<IDX>(std::numeric_limits<MatIdx>::max())); |
|
|
|
SparseMat Gamma(rowsGamma, rowsX); |
|
Gamma.setFromTriplets(rows.Begin(), rows.End()); |
|
rows.Empty(); |
|
|
|
// fill the matrix A and the coefficients for the Vector b of the linear equation system |
|
IndexArr indices; |
|
Colors vertexColors; |
|
Colors coeffB; |
|
for (const SeamVertex& seamVertex: seamVertices) { |
|
if (seamVertex.patches.size() < 2) |
|
continue; |
|
seamVertex.SortByPatchIndex(indices); |
|
vertexColors.resize(indices.size()); |
|
FOREACH(i, indices) { |
|
const SeamVertex::Patch& patch0 = seamVertex.patches[indices[i]]; |
|
ASSERT(patch0.idxPatch < numPatches); |
|
SampleImage sampler(images[texturePatches[patch0.idxPatch].label].image); |
|
for (const SeamVertex::Patch::Edge& edge: patch0.edges) { |
|
const SeamVertex& seamVertex1 = seamVertices[edge.idxSeamVertex]; |
|
const SeamVertex::Patches::IDX idxPatch1(seamVertex1.patches.Find(patch0.idxPatch)); |
|
ASSERT(idxPatch1 != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch1 = seamVertex1.patches[idxPatch1]; |
|
sampler.AddEdge(patch0.proj, patch1.proj); |
|
} |
|
vertexColors[i] = sampler.GetColor(); |
|
} |
|
const VertexPatch2RowMap& vertpatch2row = vertpatch2rows[seamVertex.idxVertex]; |
|
for (IDX i=0; i<indices.size()-1; ++i) { |
|
const uint32_t idxPatch0(seamVertex.patches[indices[i]].idxPatch); |
|
const Color& color0 = vertexColors[i]; |
|
const MatIdx col0(vertpatch2row.at(idxPatch0)); |
|
for (IDX j=i+1; j<indices.size(); ++j) { |
|
const uint32_t idxPatch1(seamVertex.patches[indices[j]].idxPatch); |
|
const Color& color1 = vertexColors[j]; |
|
const MatIdx col1(vertpatch2row.at(idxPatch1)); |
|
ASSERT(idxPatch0 < idxPatch1); |
|
const MatIdx rowA((MatIdx)coeffB.size()); |
|
coeffB.Insert(color1 - color0); |
|
ASSERT(ISFINITE(coeffB.back())); |
|
rows.emplace_back(rowA, col0, 1.f); |
|
rows.emplace_back(rowA, col1, -1.f); |
|
} |
|
} |
|
} |
|
ASSERT(coeffB.size() < static_cast<IDX>(std::numeric_limits<MatIdx>::max())); |
|
|
|
const MatIdx rowsA((MatIdx)coeffB.size()); |
|
SparseMat A(rowsA, rowsX); |
|
A.setFromTriplets(rows.Begin(), rows.End()); |
|
rows.Release(); |
|
|
|
SparseMat Lhs(A.transpose() * A + Gamma.transpose() * Gamma); |
|
// CG uses only the lower triangle, so prune the rest and compress matrix |
|
Lhs.prune([](const int& row, const int& col, const float&) -> bool { |
|
return col <= row; |
|
}); |
|
|
|
// globally solve for the correction colors |
|
Eigen::Matrix<float,Eigen::Dynamic,3,Eigen::RowMajor> colorAdjustments(rowsX, 3); |
|
{ |
|
// init CG solver |
|
Eigen::ConjugateGradient<SparseMat, Eigen::Lower> solver; |
|
solver.setMaxIterations(1000); |
|
solver.setTolerance(0.0001f); |
|
solver.compute(Lhs); |
|
ASSERT(solver.info() == Eigen::Success); |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for |
|
#endif |
|
for (int channel=0; channel<3; ++channel) { |
|
// init right hand side vector |
|
const Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> > b(coeffB.front().ptr()+channel, rowsA); |
|
// Bruce |
|
const Eigen::VectorXf Rhs(SparseMat(A.transpose()) * b); |
|
// Eigen::VectorXf Rhs = SparseMat(A.transpose()) * b_map[channel]; |
|
// colorAdjustments.col(channel) = solver.solve(Rhs).array() - solver.solve(Rhs).mean(); |
|
|
|
// solve for x |
|
const Eigen::VectorXf x(solver.solve(Rhs)); |
|
ASSERT(solver.info() == Eigen::Success); |
|
// subtract mean since the system is under-constrained and |
|
// we need the solution with minimal adjustments |
|
Eigen::Map< Eigen::VectorXf, Eigen::Unaligned, Eigen::Stride<0,3> >(colorAdjustments.data()+channel, rowsX) = x.array() - x.mean(); |
|
DEBUG_LEVEL(3, "\tcolor channel %d: %d iterations, %g residual", channel, solver.iterations(), solver.error()); |
|
} |
|
} |
|
|
|
// adjust texture patches using the correction colors |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)numPatches; ++i) { |
|
#else |
|
for (unsigned i=0; i<numPatches; ++i) { |
|
#endif |
|
const uint32_t idxPatch((uint32_t)i); |
|
TexturePatch& texturePatch = texturePatches[idxPatch]; |
|
ColorMap imageAdj(texturePatch.rect.size()); |
|
imageAdj.memset(0); |
|
cv::Mat srcImage = images[texturePatch.label].image(texturePatch.rect); |
|
|
|
// 改进1:使用自适应边缘检测 |
|
cv::Mat grayImage; |
|
cv::cvtColor(srcImage, grayImage, cv::COLOR_BGR2GRAY); |
|
cv::Mat edgeMask; |
|
cv::adaptiveThreshold(grayImage, edgeMask, 255, |
|
cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 2); |
|
ASSERT(edgeMask.type() == CV_8UC1); |
|
|
|
// interpolate color adjustments over the whole patch |
|
struct RasterPatch { |
|
const TexCoord* tri; |
|
Color colors[3]; |
|
ColorMap& image; |
|
inline RasterPatch(ColorMap& _image) : image(_image) {} |
|
inline cv::Size Size() const { return image.size(); } |
|
inline void operator()(const ImageRef& pt, const Point3f& bary) { |
|
if (!image.isInside(pt)) |
|
return; |
|
const float weight = (bary.x > 0 && bary.y > 0 && bary.z > 0) ? 1.0f : 0.8f; |
|
image(pt) = (colors[0]*bary.x + colors[1]*bary.y + colors[2]*bary.z) * weight; |
|
} |
|
} data(imageAdj); |
|
|
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const Face& face = faces[idxFace]; |
|
data.tri = faceTexcoords.Begin()+idxFace*3; |
|
for (int v=0; v<3; ++v){ |
|
if (auto search = vertpatch2rows[face[v]].find(idxPatch); search != vertpatch2rows[face[v]].end()) |
|
data.colors[v] = colorAdjustments.row(vertpatch2rows[face[v]].at(idxPatch)); |
|
} |
|
ColorMap::RasterizeTriangleBary(data.tri[0], data.tri[1], data.tri[2], data); |
|
} |
|
|
|
// dilate with one pixel width, in order to make sure patch border smooths out a little |
|
imageAdj.DilateMean<1>(imageAdj, Color::ZERO); |
|
|
|
// Bruce |
|
cv::Mat adjMat(imageAdj); |
|
// cv::GaussianBlur(adjMat, adjMat, cv::Size(3,3), 0.5); |
|
|
|
// 将原有3x3高斯核升级为5x5,并增加迭代次数 |
|
cv::GaussianBlur(adjMat, adjMat, cv::Size(5,5), 1.2); |
|
|
|
// 新增:边缘保持滤波(保留锐利边缘的同时平滑颜色过渡) |
|
cv::Mat filteredAdj; |
|
cv::edgePreservingFilter(adjMat, filteredAdj, cv::RECURS_FILTER, 60, 0.4); |
|
adjMat = filteredAdj; |
|
|
|
// 修改:在应用调整时进行边缘检测,避免过度调整 |
|
// cv::Mat edgeMask; |
|
cv::Canny(images[texturePatch.label].image(texturePatch.rect), edgeMask, 50, 150); |
|
|
|
// apply color correction to the patch image |
|
cv::Mat image(images[texturePatch.label].image(texturePatch.rect)); |
|
//* |
|
for (int r=0; r<image.rows; ++r) { |
|
for (int c=0; c<image.cols; ++c) { |
|
|
|
float edgeWeight = edgeMask.at<uchar>(r,c) > 0 ? 0.3f : 1.0f; |
|
|
|
const Color& a = imageAdj(r,c); |
|
if (a == Color::ZERO) |
|
continue; |
|
Pixel8U& v = image.at<Pixel8U>(r,c); |
|
if (v.r == 0 && v.g == 0 && v.b == 0) |
|
continue; |
|
const Color col(RGB2YCBCR(Color(v))); |
|
// const Color acol(YCBCR2RGB(Color(col+a))); |
|
Color acol = YCBCR2RGB(Color(col + a * edgeWeight)); // 应用边缘权重 |
|
|
|
for (int p=0; p<3; ++p) { |
|
float val = acol[p]; |
|
val = std::min(std::max(val, 0.0f), 255.0f); |
|
v[p] = static_cast<uint8_t>(val + 0.5f); // 四舍五入 |
|
} |
|
} |
|
} |
|
/* |
|
for (int r=0; r<image.rows; ++r) { |
|
for (int c=0; c<image.cols; ++c) { |
|
const Color& a = imageAdj(r,c); |
|
if (a == Color::ZERO) continue; |
|
|
|
Pixel8U& v = image.at<Pixel8U>(r,c); |
|
const Color col(RGB2YCBCR(Color(v))); |
|
const Color acol(YCBCR2RGB(Color(col+a))); |
|
|
|
// 添加范围限制 (0-255) |
|
for (int p=0; p<3; ++p) { |
|
float val = acol[p]; |
|
val = std::min(std::max(val, 0.0f), 255.0f); // 确保在0-255范围内 |
|
v[p] = static_cast<uint8_t>(val); |
|
} |
|
} |
|
} |
|
//*/ |
|
} |
|
|
|
} |
|
|
|
// New |
|
void MeshTexture::LocalSeamLeveling() |
|
{ |
|
ASSERT(!seamVertices.empty()); |
|
const unsigned numPatches(texturePatches.size()-1); |
|
|
|
// Create a boolean array to mark invalid vertices |
|
BoolArr vertexInvalid(vertices.size()); |
|
vertexInvalid.Memset(false); |
|
FOREACH(f, faces) { |
|
if (labelsInvalid[f] != NO_ID) { |
|
const Face& face = faces[f]; |
|
for (int v=0; v<3; ++v) |
|
vertexInvalid[face[v]] = true; |
|
} |
|
} |
|
|
|
// adjust texture patches locally, so that the border continues smoothly inside the patch |
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int i=0; i<(int)numPatches; ++i) { |
|
#else |
|
for (unsigned i=0; i<numPatches; ++i) { |
|
#endif |
|
const uint32_t idxPatch((uint32_t)i); |
|
const TexturePatch& texturePatch = texturePatches[idxPatch]; |
|
|
|
// Check if this texture patch contains any invalid vertices |
|
bool hasInvalidVertex = false; |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const Face& face = faces[idxFace]; |
|
for (int v=0; v<3; ++v) { |
|
if (vertexInvalid[face[v]]) { |
|
hasInvalidVertex = true; |
|
break; |
|
} |
|
} |
|
if (hasInvalidVertex) break; |
|
} |
|
|
|
// Set bias based on vertex validity: 0.01 if any vertex is invalid, else 1 |
|
const float bias = hasInvalidVertex ? 0.1f : 1.0f; |
|
// const float bias = 1.0f; |
|
|
|
// extract image |
|
const Image8U3& image0(images[texturePatch.label].image); |
|
Image32F3 image, imageOrg; |
|
image0(texturePatch.rect).convertTo(image, CV_32FC3, 1.0/255.0); |
|
image.copyTo(imageOrg); |
|
// render patch coverage |
|
Image8U mask(image.size()); { |
|
mask.memset(0); |
|
struct RasterMesh { |
|
Image8U& image; |
|
inline void operator()(const ImageRef& pt) { |
|
ASSERT(image.isInside(pt)); |
|
image(pt) = interior; |
|
} |
|
} data{mask}; |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
// if (labelsInvalid[idxFace] != NO_ID) |
|
// continue; |
|
const TexCoord* tri = faceTexcoords.data()+idxFace*3; |
|
ColorMap::RasterizeTriangle(tri[0], tri[1], tri[2], data); |
|
} |
|
} |
|
// render the patch border meeting neighbor patches |
|
const Sampler sampler; |
|
const TexCoord offset(texturePatch.rect.tl()); |
|
for (const SeamVertex& seamVertex0: seamVertices) { |
|
if (seamVertex0.patches.size() < 2) |
|
continue; |
|
const uint32_t idxVertPatch0(seamVertex0.patches.Find(idxPatch)); |
|
if (idxVertPatch0 == SeamVertex::Patches::NO_INDEX) |
|
continue; |
|
const SeamVertex::Patch& patch0 = seamVertex0.patches[idxVertPatch0]; |
|
const TexCoord p0(patch0.proj-offset); |
|
// for each edge of this vertex belonging to this patch... |
|
for (const SeamVertex::Patch::Edge& edge0: patch0.edges) { |
|
// select the same edge leaving from the adjacent vertex |
|
const SeamVertex& seamVertex1 = seamVertices[edge0.idxSeamVertex]; |
|
const uint32_t idxVertPatch0Adj(seamVertex1.patches.Find(idxPatch)); |
|
ASSERT(idxVertPatch0Adj != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch0Adj = seamVertex1.patches[idxVertPatch0Adj]; |
|
const TexCoord p0Adj(patch0Adj.proj-offset); |
|
// find the other patch sharing the same edge (edge with same adjacent vertex) |
|
FOREACH(idxVertPatch1, seamVertex0.patches) { |
|
if (idxVertPatch1 == idxVertPatch0) |
|
continue; |
|
const SeamVertex::Patch& patch1 = seamVertex0.patches[idxVertPatch1]; |
|
const uint32_t idxEdge1(patch1.edges.Find(edge0.idxSeamVertex)); |
|
if (idxEdge1 == SeamVertex::Patch::Edges::NO_INDEX) |
|
continue; |
|
const TexCoord& p1(patch1.proj); |
|
// select the same edge belonging to the second patch leaving from the adjacent vertex |
|
const uint32_t idxVertPatch1Adj(seamVertex1.patches.Find(patch1.idxPatch)); |
|
ASSERT(idxVertPatch1Adj != SeamVertex::Patches::NO_INDEX); |
|
const SeamVertex::Patch& patch1Adj = seamVertex1.patches[idxVertPatch1Adj]; |
|
const TexCoord& p1Adj(patch1Adj.proj); |
|
// this is an edge separating two (valid) patches; |
|
// draw it on this patch as the mean color of the two patches |
|
const Image8U3& image1(images[texturePatches[patch1.idxPatch].label].image); |
|
struct RasterPatch { |
|
Image32F3& image; |
|
Image8U& mask; |
|
const Image32F3& image0; |
|
const Image8U3& image1; |
|
const TexCoord p0, p0Dir; |
|
const TexCoord p1, p1Dir; |
|
const float length; |
|
const Sampler sampler; |
|
inline RasterPatch(Image32F3& _image, Image8U& _mask, const Image32F3& _image0, const Image8U3& _image1, |
|
const TexCoord& _p0, const TexCoord& _p0Adj, const TexCoord& _p1, const TexCoord& _p1Adj) |
|
: image(_image), mask(_mask), image0(_image0), image1(_image1), |
|
p0(_p0), p0Dir(_p0Adj-_p0), p1(_p1), p1Dir(_p1Adj-_p1), length((float)norm(p0Dir)), sampler() {} |
|
inline void operator()(const ImageRef& pt) { |
|
const float l((float)norm(TexCoord(pt)-p0)/length); |
|
// compute mean color |
|
const TexCoord samplePos0(p0 + p0Dir * l); |
|
const Color color0(image0.sample<Sampler,Color>(sampler, samplePos0)); |
|
const TexCoord samplePos1(p1 + p1Dir * l); |
|
const Color color1(image1.sample<Sampler,Color>(sampler, samplePos1)/255.f); |
|
image(pt) = Color((color0 + color1) * 0.5f); |
|
// set mask edge also |
|
mask(pt) = border; |
|
} |
|
} data(image, mask, imageOrg, image1, p0, p0Adj, p1, p1Adj); |
|
Image32F3::DrawLine(p0, p0Adj, data); |
|
// skip remaining patches, |
|
// as a manifold edge is shared by maximum two face (one in each patch), which we found already |
|
break; |
|
} |
|
} |
|
// render the vertex at the patch border meeting neighbor patches |
|
AccumColor accumColor; |
|
// for each patch... |
|
for (const SeamVertex::Patch& patch: seamVertex0.patches) { |
|
// add its view to the vertex mean color |
|
const Image8U3& img(images[texturePatches[patch.idxPatch].label].image); |
|
accumColor.Add(img.sample<Sampler,Color>(sampler, patch.proj)/255.f, 1.f); |
|
} |
|
const ImageRef pt(ROUND2INT(patch0.proj-offset)); |
|
image(pt) = accumColor.Normalized(); |
|
mask(pt) = border; |
|
} |
|
// make sure the border is continuous and |
|
// keep only the exterior tripe of the given size |
|
ProcessMask(mask, 20); |
|
// compute texture patch blending |
|
PoissonBlending(imageOrg, image, mask, bias); |
|
// apply color correction to the patch image |
|
cv::Mat imagePatch(image0(texturePatch.rect)); |
|
for (int r=0; r<image.rows; ++r) { |
|
for (int c=0; c<image.cols; ++c) { |
|
if (mask(r,c) == empty) |
|
continue; |
|
const Color& a = image(r,c); |
|
Pixel8U& v = imagePatch.at<Pixel8U>(r,c); |
|
for (int p=0; p<3; ++p) |
|
v[p] = (uint8_t)CLAMP(ROUND2INT(a[p]*255.f), 0, 255); |
|
} |
|
} |
|
} |
|
} |
|
|
|
void MeshTexture::GenerateTexture2(bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, int maxTextureSize, const SEACAVE::String& basename) |
|
{ |
|
// Bruce |
|
bGlobalSeamLeveling = false; |
|
bLocalSeamLeveling = false; |
|
// project patches in the corresponding view and compute texture-coordinates and bounding-box |
|
const int border(2); |
|
faceTexcoords.resize(faces.size()*3); |
|
faceTexindices.resize(faces.size()); |
|
#ifdef TEXOPT_USE_OPENMP |
|
// LOG_OUT() << "def TEXOPT_USE_OPENMP" << std::endl; |
|
const unsigned numPatches(texturePatches.size()-1); |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int_t idx=0; idx<(int_t)numPatches; ++idx) { |
|
TexturePatch& texturePatch = texturePatches[(uint32_t)idx]; |
|
#else |
|
for (TexturePatch *pTexturePatch=texturePatches.Begin(), *pTexturePatchEnd=texturePatches.End()-1; pTexturePatch<pTexturePatchEnd; ++pTexturePatch) { |
|
TexturePatch& texturePatch = *pTexturePatch; |
|
#endif |
|
const Image& imageData = images[texturePatch.label]; |
|
AABB2f aabb(true); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
const Face& face = faces[idxFace]; |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int i=0; i<3; ++i) { |
|
texcoords[i] = imageData.camera.ProjectPointP(vertices[face[i]]); |
|
ASSERT(imageData.image.isInsideWithBorder(texcoords[i], border)); |
|
aabb.InsertFull(texcoords[i]); |
|
} |
|
} |
|
// compute relative texture coordinates |
|
ASSERT(imageData.image.isInside(Point2f(aabb.ptMin))); |
|
ASSERT(imageData.image.isInside(Point2f(aabb.ptMax))); |
|
texturePatch.rect.x = FLOOR2INT(aabb.ptMin[0])-border; |
|
texturePatch.rect.y = FLOOR2INT(aabb.ptMin[1])-border; |
|
texturePatch.rect.width = CEIL2INT(aabb.ptMax[0]-aabb.ptMin[0])+border*2; |
|
texturePatch.rect.height = CEIL2INT(aabb.ptMax[1]-aabb.ptMin[1])+border*2; |
|
ASSERT(imageData.image.isInside(texturePatch.rect.tl())); |
|
ASSERT(imageData.image.isInside(texturePatch.rect.br())); |
|
const TexCoord offset(texturePatch.rect.tl()); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int v=0; v<3; ++v) |
|
texcoords[v] -= offset; |
|
} |
|
} |
|
{ |
|
// init last patch to point to a small uniform color patch |
|
TexturePatch& texturePatch = texturePatches.back(); |
|
const int sizePatch(border*2+1); |
|
texturePatch.rect = cv::Rect(0,0, sizePatch,sizePatch); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int i=0; i<3; ++i) |
|
texcoords[i] = TexCoord(0.5f, 0.5f); |
|
} |
|
} |
|
|
|
|
|
LOG_OUT() << "First loop completed" << std::endl; |
|
// perform seam leveling |
|
if (texturePatches.size() > 2 && (bGlobalSeamLeveling || bLocalSeamLeveling)) { |
|
// create seam vertices and edges |
|
CreateSeamVertices(); |
|
|
|
// perform global seam leveling |
|
if (bGlobalSeamLeveling) { |
|
TD_TIMER_STARTD(); |
|
GlobalSeamLeveling(); |
|
DEBUG_ULTIMATE("\tglobal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
// perform local seam leveling |
|
if (bLocalSeamLeveling) { |
|
TD_TIMER_STARTD(); |
|
LocalSeamLeveling(); |
|
DEBUG_ULTIMATE("\tlocal seam leveling completed (%s)", TD_TIMER_GET_FMT().c_str()); |
|
} |
|
} |
|
|
|
// merge texture patches with overlapping rectangles |
|
for (unsigned i=0; i<texturePatches.size()-1; ++i) { |
|
TexturePatch& texturePatchBig = texturePatches[i]; |
|
for (unsigned j=1; j<texturePatches.size(); ++j) { |
|
if (i == j) |
|
continue; |
|
TexturePatch& texturePatchSmall = texturePatches[j]; |
|
if (texturePatchBig.label != texturePatchSmall.label) |
|
continue; |
|
if (!RectsBinPack::IsContainedIn(texturePatchSmall.rect, texturePatchBig.rect)) |
|
continue; |
|
// translate texture coordinates |
|
const TexCoord offset(texturePatchSmall.rect.tl()-texturePatchBig.rect.tl()); |
|
for (const FIndex idxFace: texturePatchSmall.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
for (int v=0; v<3; ++v) |
|
texcoords[v] += offset; |
|
} |
|
// join faces lists |
|
texturePatchBig.faces.JoinRemove(texturePatchSmall.faces); |
|
// remove the small patch |
|
texturePatches.RemoveAtMove(j--); |
|
} |
|
} |
|
|
|
LOG_OUT() << "Second loop completed" << std::endl; |
|
// create texture |
|
{ |
|
// arrange texture patches to fit the smallest possible texture image |
|
// const unsigned minPatchSize = 20; |
|
RectsBinPack::RectWIdxArr unplacedRects(texturePatches.size()); |
|
FOREACH(i, texturePatches) { |
|
|
|
// LOG_OUT() << "Third loop completed" << std::endl; |
|
if (maxTextureSize > 0 && (texturePatches[i].rect.width > maxTextureSize || texturePatches[i].rect.height > maxTextureSize)) { |
|
DEBUG("error: a patch of size %u x %u does not fit the texture", texturePatches[i].rect.width, texturePatches[i].rect.height); |
|
ABORT("the maximum texture size chosen cannot fit a patch"); |
|
} |
|
unplacedRects[i] = {texturePatches[i].rect, i}; |
|
} |
|
LOG_OUT() << "unplacedRects loop completed" << std::endl; |
|
|
|
LOG_OUT() << "pack patches: one pack per texture file loop completed" << std::endl; |
|
// pack patches: one pack per texture file |
|
CLISTDEF2IDX(RectsBinPack::RectWIdxArr, TexIndex) placedRects; { |
|
// increase texture size till all patches fit |
|
const unsigned typeRectsBinPack(nRectPackingHeuristic/100); |
|
const unsigned typeSplit((nRectPackingHeuristic-typeRectsBinPack*100)/10); |
|
const unsigned typeHeuristic(nRectPackingHeuristic%10); |
|
int textureSize = 0; |
|
while (!unplacedRects.empty()) { |
|
TD_TIMER_STARTD(); |
|
if (textureSize == 0) { |
|
textureSize = RectsBinPack::ComputeTextureSize(unplacedRects, nTextureSizeMultiple); |
|
if (maxTextureSize > 0 && textureSize > maxTextureSize) |
|
textureSize = maxTextureSize; |
|
} |
|
|
|
RectsBinPack::RectWIdxArr newPlacedRects; |
|
switch (typeRectsBinPack) { |
|
case 0: { |
|
MaxRectsBinPack pack(textureSize, textureSize); |
|
newPlacedRects = pack.Insert(unplacedRects, (MaxRectsBinPack::FreeRectChoiceHeuristic)typeHeuristic); |
|
break; } |
|
case 1: { |
|
SkylineBinPack pack(textureSize, textureSize, typeSplit!=0); |
|
newPlacedRects = pack.Insert(unplacedRects, (SkylineBinPack::LevelChoiceHeuristic)typeHeuristic); |
|
break; } |
|
case 2: { |
|
GuillotineBinPack pack(textureSize, textureSize); |
|
newPlacedRects = pack.Insert(unplacedRects, false, (GuillotineBinPack::FreeRectChoiceHeuristic)typeHeuristic, (GuillotineBinPack::GuillotineSplitHeuristic)typeSplit); |
|
break; } |
|
default: |
|
ABORT("error: unknown RectsBinPack type"); |
|
} |
|
DEBUG_ULTIMATE("\tpacking texture completed: %u initial patches, %u placed patches, %u texture-size, %u textures (%s)", texturePatches.size(), newPlacedRects.size(), textureSize, placedRects.size(), TD_TIMER_GET_FMT().c_str()); |
|
|
|
if (textureSize == maxTextureSize || unplacedRects.empty()) { |
|
// create texture image |
|
placedRects.emplace_back(std::move(newPlacedRects)); |
|
texturesDiffuse.emplace_back(textureSize, textureSize).setTo(cv::Scalar(colEmpty.b, colEmpty.g, colEmpty.r)); |
|
textureSize = 0; |
|
} else { |
|
// try again with a bigger texture |
|
textureSize *= 2; |
|
if (maxTextureSize > 0) |
|
textureSize = std::max(textureSize, maxTextureSize); |
|
unplacedRects.JoinRemove(newPlacedRects); |
|
} |
|
} |
|
} |
|
LOG_OUT() << "Third loop completed" << std::endl; |
|
Mesh::FaceIdxArr emptyFaceIndexes; |
|
|
|
#ifdef TEXOPT_USE_OPENMP |
|
#pragma omp parallel for schedule(dynamic) |
|
for (int_t i=0; i<(int_t)placedRects.size(); ++i) { |
|
for (int_t j=0; j<(int_t)placedRects[(TexIndex)i].size(); ++j) { |
|
const TexIndex idxTexture((TexIndex)i); |
|
const uint32_t idxPlacedPatch((uint32_t)j); |
|
#else |
|
FOREACH(idxTexture, placedRects) { |
|
FOREACH(idxPlacedPatch, placedRects[idxTexture]) { |
|
#endif |
|
const TexturePatch& texturePatch = texturePatches[placedRects[idxTexture][idxPlacedPatch].patchIdx]; |
|
const RectsBinPack::Rect& rect = placedRects[idxTexture][idxPlacedPatch].rect; |
|
// copy patch image |
|
ASSERT((rect.width == texturePatch.rect.width && rect.height == texturePatch.rect.height) || |
|
(rect.height == texturePatch.rect.width && rect.width == texturePatch.rect.height)); |
|
int x(0), y(1); |
|
if (texturePatch.label != NO_ID) { |
|
const Image& imageData = images[texturePatch.label]; |
|
cv::Mat patch(imageData.image(texturePatch.rect)); |
|
if (rect.width != texturePatch.rect.width) { |
|
// flip patch and texture-coordinates |
|
patch = patch.t(); |
|
x = 1; y = 0; |
|
} |
|
patch.copyTo(texturesDiffuse[idxTexture](rect)); |
|
} |
|
else |
|
{ |
|
auto it = texturePatch.faces.begin(); |
|
while (it != texturePatch.faces.end()) |
|
{ |
|
emptyFaceIndexes.push_back(*it); |
|
++it; |
|
} |
|
} |
|
// compute final texture coordinates |
|
const TexCoord offset(rect.tl()); |
|
for (const FIndex idxFace: texturePatch.faces) { |
|
TexCoord* texcoords = faceTexcoords.data()+idxFace*3; |
|
faceTexindices[idxFace] = idxTexture; |
|
for (int v=0; v<3; ++v) { |
|
TexCoord& texcoord = texcoords[v]; |
|
texcoord = TexCoord( |
|
texcoord[x]+offset.x, |
|
texcoord[y]+offset.y |
|
); |
|
} |
|
} |
|
} |
|
} |
|
if (texturesDiffuse.size() == 1) |
|
faceTexindices.Release(); |
|
// apply some sharpening |
|
if (fSharpnessWeight > 0) { |
|
constexpr double sigma = 1.5; |
|
for (auto &textureDiffuse: texturesDiffuse) { |
|
Image8U3 blurryTextureDiffuse; |
|
cv::GaussianBlur(textureDiffuse, blurryTextureDiffuse, cv::Size(), sigma); |
|
cv::addWeighted(textureDiffuse, 1+fSharpnessWeight, blurryTextureDiffuse, -fSharpnessWeight, 0, textureDiffuse); |
|
} |
|
} |
|
LOG_OUT() << "Fourth loop completed" << std::endl; |
|
|
|
std::ofstream out(basename + "_empty_color_triangles.txt"); |
|
RFOREACHPTR(pIdxF, emptyFaceIndexes) { |
|
out << *pIdxF << "\n"; |
|
} |
|
out.close(); |
|
} |
|
} |
|
|
|
// 保存遮挡数据到文件 |
|
void Scene::SaveVisibleFacesData(std::map<std::string, std::unordered_set<int>>& visible_faces_map, |
|
std::unordered_set<int>& face_visible_relative, |
|
std::map<std::string, std::unordered_set<int>>& edge_faces_map, |
|
std::map<std::string, std::unordered_set<int>>& delete_edge_faces_map, |
|
std::string& basePath) { |
|
// 保存 visible_faces_map |
|
std::ofstream mapFile(basePath + "_visible_faces_map.txt"); |
|
if (mapFile.is_open()) { |
|
for (const auto& entry : visible_faces_map) { |
|
mapFile << entry.first; |
|
for (int face : entry.second) { |
|
mapFile << " " << face; |
|
} |
|
mapFile << "\n"; |
|
} |
|
mapFile.close(); |
|
} |
|
|
|
// 保存 face_visible_relative |
|
std::ofstream relativeFile(basePath + "_face_visible_relative.txt"); |
|
if (relativeFile.is_open()) { |
|
for (int face : face_visible_relative) { |
|
relativeFile << face << "\n"; |
|
} |
|
relativeFile.close(); |
|
} |
|
|
|
std::ofstream mapFile2(basePath + "_edge_faces_map.txt"); |
|
if (mapFile2.is_open()) { |
|
for (const auto& entry : edge_faces_map) { |
|
mapFile2 << entry.first; |
|
for (int face : entry.second) { |
|
mapFile2 << " " << face; |
|
} |
|
mapFile2 << "\n"; |
|
} |
|
mapFile2.close(); |
|
} |
|
|
|
std::ofstream mapFile3(basePath + "_delete_edge_faces_map.txt"); |
|
if (mapFile3.is_open()) { |
|
for (const auto& entry : delete_edge_faces_map) { |
|
mapFile3 << entry.first; |
|
for (int face : entry.second) { |
|
mapFile3 << " " << face; |
|
} |
|
mapFile3 << "\n"; |
|
} |
|
mapFile3.close(); |
|
} |
|
} |
|
|
|
// 从文件加载遮挡数据 |
|
bool Scene::LoadVisibleFacesData(std::map<std::string, std::unordered_set<int>>& visible_faces_map, |
|
std::unordered_set<int>& face_visible_relative, |
|
std::map<std::string, std::unordered_set<int>>& edge_faces_map, |
|
std::map<std::string, std::unordered_set<int>>& delete_edge_faces_map, |
|
std::string& basePath) { |
|
printf("LoadVisibleFacesData %s\n", basePath.c_str()); |
|
std::ifstream mapFile(basePath + "_visible_faces_map.txt"); |
|
if (!mapFile.is_open()) { |
|
return false; |
|
} |
|
|
|
std::string line; |
|
while (std::getline(mapFile, line)) { |
|
std::istringstream iss(line); |
|
std::string image_name; |
|
iss >> image_name; |
|
std::unordered_set<int> faces; |
|
int face_index; |
|
while (iss >> face_index) { |
|
faces.insert(face_index); |
|
} |
|
visible_faces_map[image_name] = faces; |
|
} |
|
mapFile.close(); |
|
|
|
std::ifstream relativeFile(basePath + "_face_visible_relative.txt"); |
|
if (!relativeFile.is_open()) { |
|
return false; |
|
} |
|
|
|
while (std::getline(relativeFile, line)) { |
|
int face_index = std::stoi(line); |
|
face_visible_relative.insert(face_index); |
|
} |
|
relativeFile.close(); |
|
|
|
std::ifstream mapFile2(basePath + "_edge_faces_map.txt"); |
|
if (!mapFile2.is_open()) { |
|
return false; |
|
} |
|
|
|
while (std::getline(mapFile2, line)) { |
|
std::istringstream iss(line); |
|
std::string image_name; |
|
iss >> image_name; |
|
std::unordered_set<int> faces; |
|
int face_index; |
|
while (iss >> face_index) { |
|
faces.insert(face_index); |
|
} |
|
edge_faces_map[image_name] = faces; |
|
} |
|
mapFile2.close(); |
|
|
|
std::ifstream mapFile3(basePath + "_delete_edge_faces_map.txt"); |
|
if (!mapFile3.is_open()) { |
|
return false; |
|
} |
|
|
|
while (std::getline(mapFile3, line)) { |
|
std::istringstream iss(line); |
|
std::string image_name; |
|
iss >> image_name; |
|
std::unordered_set<int> faces; |
|
int face_index; |
|
while (iss >> face_index) { |
|
faces.insert(face_index); |
|
} |
|
delete_edge_faces_map[image_name] = faces; |
|
} |
|
mapFile3.close(); |
|
|
|
return true; |
|
} |
|
|
|
// texture mesh |
|
// - minCommonCameras: generate texture patches using virtual faces composed of coplanar triangles sharing at least this number of views (0 - disabled, 3 - good value) |
|
// - fSharpnessWeight: sharpness weight to be applied on the texture (0 - disabled, 0.5 - good value) |
|
// - nIgnoreMaskLabel: label value to ignore in the image mask, stored in the MVS scene or next to each image with '.mask.png' extension (-1 - auto estimate mask for lens distortion, -2 - disabled) |
|
bool Scene::TextureMesh(unsigned nResolutionLevel, unsigned nMinResolution, unsigned minCommonCameras, float fOutlierThreshold, float fRatioDataSmoothness, |
|
bool bGlobalSeamLeveling, bool bLocalSeamLeveling, unsigned nTextureSizeMultiple, unsigned nRectPackingHeuristic, Pixel8U colEmpty, float fSharpnessWeight, |
|
int nIgnoreMaskLabel, int maxTextureSize, const IIndexArr& views, const SEACAVE::String& baseFileName, bool bOriginFaceview, |
|
const std::string& inputFileName, const std::string& meshFileName) |
|
{ |
|
if (!bOriginFaceview) |
|
{ |
|
// 确保网格拓扑结构已计算 |
|
if (mesh.faceFaces.empty()) { |
|
mesh.ListIncidenteFaces(); // 计算顶点-面邻接关系 |
|
mesh.ListIncidenteFaceFaces(); // 计算面-面邻接关系 |
|
} |
|
|
|
// 确保法向量已计算 |
|
if (mesh.faceNormals.empty()) { |
|
mesh.ComputeNormalFaces(); // 计算面法向量 |
|
} |
|
|
|
// 确保顶点边界信息已计算 |
|
if (mesh.vertexBoundary.empty()) { |
|
mesh.ListBoundaryVertices(); // 计算边界顶点 |
|
} |
|
|
|
Mesh::FaceIdxArr regionMap; |
|
this->SegmentMeshBasedOnCurvature(regionMap, 0.2f); // 曲率阈值设为0.2 |
|
} |
|
|
|
MeshTexture texture(*this, nResolutionLevel, nMinResolution); |
|
|
|
// printf("baseFileName=%s\n", baseFileName.c_str()); |
|
|
|
/* |
|
std::filesystem::path path(baseFileName.c_str()); |
|
std::string parentPath = path.parent_path().string(); // 获取父目录 |
|
|
|
String altTexPath = String(parentPath) + "/mesh_material_0_map_Kd2.png"; |
|
printf("altTexPath=%s\n", altTexPath.c_str()); |
|
// 加载备用纹理 |
|
Image8U3 altTex; |
|
if (!altTex.Load(altTexPath)) { |
|
// 如果加载失败,可以输出警告,但不中断流程 |
|
DEBUG_EXTRA("Warning: Failed to load alternative texture mesh_material_0_map_Kd2.png"); |
|
} else { |
|
texture.alternativeTexture = &altTex; |
|
} |
|
//*/ |
|
|
|
std::string id; |
|
|
|
// 1. 查找最后一个 '/' 的位置 |
|
size_t last_slash = baseFileName.rfind('/'); |
|
if (last_slash == std::string::npos) { |
|
id = baseFileName; // 无斜杠时返回原字符串 |
|
} else { |
|
// 2. 查找倒数第二个 '/' 的位置(在 last_slash 之前) |
|
size_t second_last_slash = baseFileName.rfind('/', last_slash - 1); |
|
if (second_last_slash == std::string::npos) { |
|
id = baseFileName.substr(0, last_slash); // 不足两个斜杠时返回第一个斜杠前的内容 |
|
} else { |
|
// 3. 查找倒数第三个 '/' 的位置(在 second_last_slash 之前) |
|
size_t third_last_slash = baseFileName.rfind('/', second_last_slash - 1); |
|
if (third_last_slash == std::string::npos) { |
|
id = baseFileName.substr(0, second_last_slash); // 不足三个斜杠时返回开头到倒数第二个斜杠的内容 |
|
} else { |
|
// 4. 截取倒数第三个和倒数第二个斜杠之间的子串 |
|
id = baseFileName.substr(third_last_slash + 1, second_last_slash - third_last_slash - 1); |
|
} |
|
} |
|
} |
|
printf("id=%s\n", id.c_str()); |
|
|
|
#ifdef MASK_FACE_OCCLUSION |
|
//* |
|
fs::path p(baseFileName.c_str()); |
|
// 2. 获取父路径 (e.g., /path/to/data/scene) |
|
fs::path parent = p.parent_path(); |
|
// 4. 转换为字符串,并附加一个路径分隔符 |
|
// ( / "" 会自动处理,确保 /path/to/data 变为 /path/to/data/ ) |
|
std::string basePath = (parent / "").string(); |
|
//*/ |
|
/* |
|
std::string basePath = ""; |
|
size_t lastSlash = baseFileName.find_last_of('/'); |
|
size_t secondLastSlash = baseFileName.find_last_of('/', lastSlash - 1); |
|
if (secondLastSlash == std::string::npos) |
|
basePath = baseFileName; |
|
basePath = baseFileName.substr(0, secondLastSlash + 1); |
|
/*/ |
|
|
|
// printf("basePath=%s\n", basePath.c_str()); |
|
|
|
if (!LoadVisibleFacesData(visible_faces_map, face_visible_relative, edge_faces_map, delete_edge_faces_map, basePath)) |
|
{ |
|
printf("LoadVisibleFacesData error\n"); |
|
} |
|
#endif |
|
|
|
// assign the best view to each face |
|
{ |
|
TD_TIMER_STARTD(); |
|
if (bOriginFaceview) |
|
{ |
|
if (!texture.FaceViewSelection(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views)) |
|
return false; |
|
} |
|
else |
|
{ |
|
// if (!texture.FaceViewSelection3(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views)) |
|
// return false; |
|
|
|
// 第一轮处理:使用虚拟面映射,跳过无效面片 |
|
// if (false) |
|
{ |
|
TD_TIMER_STARTD(); |
|
if (!texture.FaceViewSelection3(minCommonCameras, fOutlierThreshold, fRatioDataSmoothness, nIgnoreMaskLabel, views)) |
|
return false; |
|
DEBUG_EXTRA("First pass (virtual faces) completed: %u faces (%s)", mesh.faces.size(), TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
// 第二轮处理:专门处理无效面片(非虚拟面映射) |
|
if (false) |
|
{ |
|
TD_TIMER_STARTD(); |
|
|
|
// 收集所有无效面片的索引 |
|
Mesh::FaceIdxArr invalidFaces; |
|
for (FIndex fid = 0; fid < mesh.faces.size(); ++fid) { |
|
if (texture.scene.mesh.invalidFaces.data.contains(fid)) { |
|
invalidFaces.push_back(fid); |
|
} |
|
} |
|
|
|
if (!invalidFaces.empty()) { |
|
// 创建新的纹理处理器,专门处理无效面片 |
|
MeshTexture textureInvalid(*this, nResolutionLevel, nMinResolution); |
|
|
|
// 使用非虚拟面模式处理无效面片 |
|
if (!textureInvalid.FaceViewSelection4(0, fOutlierThreshold, fRatioDataSmoothness, |
|
nIgnoreMaskLabel, views, &invalidFaces)) |
|
{ |
|
return false; |
|
} |
|
|
|
// 合并两轮处理的结果 |
|
texture.texturePatches.Join(textureInvalid.texturePatches); |
|
texture.seamEdges.Join(textureInvalid.seamEdges); |
|
} |
|
|
|
DEBUG_EXTRA("Second pass (invalid faces) completed: %u faces (%s)", |
|
invalidFaces.size(), TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
} |
|
DEBUG_EXTRA("Assigning the best view to each face completed: %u faces (%s)", mesh.faces.size(), TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
// generate the texture image and atlas |
|
{ |
|
TD_TIMER_STARTD(); |
|
texture.GenerateTexture(bGlobalSeamLeveling, bLocalSeamLeveling, nTextureSizeMultiple, nRectPackingHeuristic, colEmpty, fSharpnessWeight, maxTextureSize, baseFileName, bOriginFaceview); |
|
DEBUG_EXTRA("Generating texture atlas and image completed: %u patches, %u image size, %u textures (%s)", texture.texturePatches.size(), mesh.texturesDiffuse[0].width(), mesh.texturesDiffuse.size(), TD_TIMER_GET_FMT().c_str()); |
|
} |
|
|
|
return true; |
|
} // TextureMesh |
|
|
|
std::string Scene::runPython(const std::string& command) { |
|
std::array<char, 128> buffer{}; |
|
std::string result; |
|
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(command.c_str(), "r"), pclose); |
|
if (!pipe) throw std::runtime_error("popen() failed!"); |
|
|
|
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) { |
|
result += buffer.data(); |
|
} |
|
return result; |
|
} |
|
|
|
bool Scene::is_face_visible(const std::string& image_name, int face_index) { |
|
|
|
#ifndef MASK_FACE_OCCLUSION |
|
return true; |
|
#endif |
|
|
|
auto it = visible_faces_map.find(image_name); |
|
if (it != visible_faces_map.end()) { |
|
return it->second.find(face_index) != it->second.end(); |
|
} |
|
return false; |
|
} |
|
|
|
bool Scene::is_face_visible_relative(int face_index) |
|
{ |
|
#ifndef MASK_FACE_OCCLUSION |
|
return true; |
|
#endif |
|
|
|
return face_visible_relative.contains(face_index); |
|
} |
|
|
|
|
|
bool Scene::is_face_edge(const std::string& image_name, int face_index) { |
|
|
|
#ifndef MASK_FACE_OCCLUSION |
|
return true; |
|
#endif |
|
|
|
auto it = edge_faces_map.find(image_name); |
|
if (it != edge_faces_map.end()) { |
|
// printf("is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); |
|
|
|
for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { |
|
// std::cout << *it2 << " "; |
|
} |
|
// std::cout << std::endl; |
|
|
|
// if (it->second.find(face_index) != it->second.end()) |
|
// printf("find is_face_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); |
|
return it->second.find(face_index) != it->second.end(); |
|
} |
|
return false; |
|
} |
|
|
|
bool Scene::is_face_delete_edge(const std::string& image_name, int face_index) { |
|
|
|
#ifndef MASK_FACE_OCCLUSION |
|
return true; |
|
#endif |
|
|
|
auto it = delete_edge_faces_map.find(image_name); |
|
if (it != delete_edge_faces_map.end()) { |
|
// printf("is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); |
|
|
|
for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) { |
|
// std::cout << *it2 << " "; |
|
} |
|
// std::cout << std::endl; |
|
|
|
// if (it->second.find(face_index) != it->second.end()) |
|
// printf("find is_face_delete_edge %s, %d, %d\n", image_name.c_str(), it->second.size(), face_index); |
|
return it->second.find(face_index) != it->second.end(); |
|
} |
|
return false; |
|
} |
|
|
|
void Scene::SegmentMeshBasedOnCurvature(Mesh::FaceIdxArr& regionMap, float curvatureThreshold) { |
|
// 确保网格数据有效 |
|
if (mesh.faces.empty() || mesh.vertices.empty() || |
|
mesh.faceFaces.size() != mesh.faces.size() || |
|
mesh.faceNormals.size() != mesh.faces.size()) { |
|
regionMap.resize(mesh.faces.size()); |
|
regionMap.Memset(0); // 默认全部设为区域0 |
|
return; |
|
} |
|
|
|
regionMap.resize(mesh.faces.size()); |
|
const size_t numFaces = mesh.faces.size(); |
|
|
|
for (size_t fid = 0; fid < numFaces; ++fid) { |
|
// 检查面索引是否有效 |
|
if (fid >= mesh.faces.size()) continue; |
|
|
|
const Mesh::Face& f = mesh.faces[fid]; |
|
|
|
// 检查顶点索引是否有效 |
|
if (f[0] >= mesh.vertices.size() || |
|
f[1] >= mesh.vertices.size() || |
|
f[2] >= mesh.vertices.size()) { |
|
regionMap[fid] = 0; |
|
continue; |
|
} |
|
|
|
const Mesh::Vertex v0 = mesh.vertices[f[0]]; |
|
const Mesh::Vertex v1 = mesh.vertices[f[1]]; |
|
const Mesh::Vertex v2 = mesh.vertices[f[2]]; |
|
|
|
// 计算面法向量 |
|
const Mesh::Vertex edge1 = v1 - v0; |
|
const Mesh::Vertex edge2 = v2 - v0; |
|
|
|
Mesh::Normal normal; |
|
normal.x = edge1.y * edge2.z - edge1.z * edge2.y; |
|
normal.y = edge1.z * edge2.x - edge1.x * edge2.z; |
|
normal.z = edge1.x * edge2.y - edge1.y * edge2.x; |
|
|
|
const float length = std::sqrt(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); |
|
if (length > FLT_EPSILON) { |
|
normal.x /= length; |
|
normal.y /= length; |
|
normal.z /= length; |
|
} |
|
|
|
float curvature = 0.0f; |
|
int count = 0; |
|
|
|
// 检查面邻接关系是否有效 |
|
if (fid < mesh.faceFaces.size()) { |
|
for (int i = 0; i < 3; ++i) { |
|
const Mesh::FaceFaces::Type adjFaceIdx = mesh.faceFaces[fid][i]; |
|
if (adjFaceIdx == NO_ID) continue; |
|
|
|
// 检查邻接面索引是否有效 |
|
if (adjFaceIdx < mesh.faceNormals.size()) { |
|
const Mesh::Normal& adjNormal = mesh.faceNormals[adjFaceIdx]; |
|
const float dot = normal.x * adjNormal.x + normal.y * adjNormal.y + normal.z * adjNormal.z; |
|
curvature += 1.0f - std::abs(dot); |
|
count++; |
|
} |
|
} |
|
} |
|
|
|
if (count > 0) { |
|
curvature /= count; |
|
} |
|
|
|
regionMap[fid] = (curvature > curvatureThreshold) ? 1 : 0; |
|
} |
|
} |
|
|
|
/*----------------------------------------------------------------*/
|
|
|