Browse Source

处理头发丝和花之类遮挡问题

master
hesuicong 2 months ago
parent
commit
fcffff8083
  1. 874
      libs/MVS/SceneTexture.cpp
  2. 7
      libs/MVS/mask_face_occlusion.py

874
libs/MVS/SceneTexture.cpp

@ -455,6 +455,7 @@ public: @@ -455,6 +455,7 @@ public:
void CreateVirtualFaces4(const FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, Mesh::FaceIdxArr& mapFaceToVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f);
void CreateVirtualFaces5(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const;
bool CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const;
bool CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras=2, float thMaxNormalDeviation=25.f) const;
IIndexArr SelectBestViews(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const;
IIndexArr SelectBestView(const FaceDataArr& faceDatas, FIndex fid, unsigned minCommonCameras, float ratioAngleToQuality) const;
@ -3656,8 +3657,830 @@ bool MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA @@ -3656,8 +3657,830 @@ bool MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewA
return true;
}
/*
bool MeshTexture::CreateVirtualFaces7(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, std::vector<bool>& isVirtualFace, unsigned minCommonCameras, float thMaxNormalDeviation) const
{
if (meshCurvatures.empty()) {
ComputeFaceCurvatures();
}
float thMaxColorDeviation = 130.0f;
const float ratioAngleToQuality(0.67f);
const float cosMaxNormalDeviation(COS(FD2R(thMaxNormalDeviation)));
Mesh::FaceIdxArr remainingFaces(faces.size());
std::iota(remainingFaces.begin(), remainingFaces.end(), 0);
std::vector<bool> selectedFaces(faces.size(), false);
cQueue<FIndex, FIndex, 0> currentVirtualFaceQueue;
std::unordered_set<FIndex> queuedFaces;
// Precompute average color for each face
Colors faceColors; // 创建一个空列表
faceColors.reserve(faces.size()); // 预分配空间(如果cList有reserve方法且您关心性能)
for (size_t i = 0; i < faces.size(); ++i) {
faceColors.push_back(Color::ZERO); // 逐个添加元素
}
for (FIndex idxFace = 0; idxFace < faces.size(); ++idxFace) {
const FaceDataArr& faceDatas = facesDatas[idxFace];
if (faceDatas.empty()) continue;
Color sumColor = Color::ZERO;
for (const FaceData& fd : faceDatas) {
sumColor += fd.color;
}
faceColors[idxFace] = sumColor / faceDatas.size();
}
do {
const FIndex startPos = RAND() % remainingFaces.size();
const FIndex virtualFaceCenterFaceID = remainingFaces[startPos];
// 动态法线阈值
const float centerCurvature = meshCurvatures[virtualFaceCenterFaceID];
const float dynamicThreshold = (centerCurvature < 0.2f) ? 15.0f : 8.0f; // 曲率<0.2为平坦区域
const float dynamicCosTh = COS(FD2R(dynamicThreshold));
ASSERT(currentVirtualFaceQueue.IsEmpty());
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID];
const FaceDataArr& centerFaceDatas = facesDatas[virtualFaceCenterFaceID];
// 检查中心面片是否包含无效视图
bool bHasInvalidView = false;
int nInvalidViewCount = 0;
int nTotalViewCount = 0;
for (const FaceData& faceData : centerFaceDatas) {
if (faceData.bInvalidFacesRelative) {
bHasInvalidView = true;
++nInvalidViewCount;
// break;
}
++nTotalViewCount;
}
std::vector<std::pair<float, Color>> sortedViews;
std::vector<std::pair<float, Color>> sortedLuminViews;
std::vector<std::pair<float, Color>> validViews;
sortedViews.reserve(centerFaceDatas.size());
for (const FaceData& fd : centerFaceDatas) {
if (fd.bInvalidFacesRelative)
{
// invalidView = fd.idxView;
// invalidQuality = fd.quality;
sortedViews.emplace_back(fd.quality, fd.color);
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color);
}
else
{
sortedViews.emplace_back(fd.quality, fd.color);
sortedLuminViews.emplace_back(MeshTexture::GetLuminance(fd.color), fd.color);
validViews.emplace_back(fd.quality, fd.color);
}
}
std::sort(sortedViews.begin(), sortedViews.end(),
[](const auto& a, const auto& b) { return a.first > b.first; });
std::sort(validViews.begin(), validViews.end(),
[](const auto& a, const auto& b) { return a.first > b.first; });
int nSize = sortedViews.size();
// int nSize = (sortedViews.size()>1) ? 1 : sortedViews.size();
// 计算初始平均值
float totalQuality = 0.0f;
Color totalColor(0,0,0);
for (int n = 0; n < nSize; ++n) {
totalQuality += sortedViews[n].first;
totalColor += sortedViews[n].second;
}
const float avgQuality = totalQuality / nSize;
const Color avgColor = totalColor / nSize;
float totalLuminance = MeshTexture::GetLuminance(totalColor);
float avgLuminance = totalLuminance / nSize;
std::sort(sortedLuminViews.begin(), sortedLuminViews.end(),
[avgLuminance](const auto& a, const auto& b) {
float luminDistA = cv::norm(avgLuminance - a.first);
float luminDistB = cv::norm(avgLuminance - b.first);
return luminDistA < luminDistB; });
// select the common cameras
Mesh::FaceIdxArr virtualFace;
FaceDataArr virtualFaceDatas;
if (centerFaceDatas.empty()) {
virtualFace.emplace_back(virtualFaceCenterFaceID);
selectedFaces[virtualFaceCenterFaceID] = true;
const auto posToErase = remainingFaces.FindFirst(virtualFaceCenterFaceID);
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX);
remainingFaces.RemoveAtMove(posToErase);
} else {
IIndexArr selectedCams = SelectBestViews(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality);
/*
// 获取中心面片的法线 (注意变量名是 normalCenter, 不是 centerNormal)
const Normal& normalCenter = scene.mesh.faceNormals[virtualFaceCenterFaceID];
// 过滤selectedCams:只保留夹角小于30度的视图
IIndexArr filteredCams; // 用于存储过滤后的视图索引
for (IIndex idxView : selectedCams) {
const Image& imageData = images[idxView];
// 计算相机在世界坐标系中的朝向向量(相机镜面法线)
const RMatrix& R = imageData.camera.R; // 请根据 R 的实际类型调整,可能是 Matrix3x3f 或其他
// 相机局部坐标系中的向前向量 (0,0,-1)
Point3f localForward(0.0f, 0.0f, -1.0f);
// 手动计算矩阵乘法:cameraForward = R * localForward
Point3f cameraForward;
cameraForward.x = R(0,0) * localForward.x + R(0,1) * localForward.y + R(0,2) * localForward.z;
cameraForward.y = R(1,0) * localForward.x + R(1,1) * localForward.y + R(1,2) * localForward.z;
cameraForward.z = R(2,0) * localForward.x + R(2,1) * localForward.y + R(2,2) * localForward.z;
// 手动归一化 cameraForward(因为 Point3f 可能没有 normalize() 成员函数)
float norm = std::sqrt(cameraForward.x * cameraForward.x +
cameraForward.y * cameraForward.y +
cameraForward.z * cameraForward.z);
if (norm > 0.0f) {
cameraForward.x /= norm;
cameraForward.y /= norm;
cameraForward.z /= norm;
} else {
// 处理零向量的情况,赋予默认值
cameraForward = Point3f(0, 0, -1);
}
// 计算夹角余弦值 - 使用已声明的 normalCenter
// 假设 Normal 类型可以隐式转换为 Point3f,或进行显式转换
Point3f normalPoint(normalCenter.x, normalCenter.y, normalCenter.z); // 显式转换示例
float cosAngle = cameraForward.dot(normalPoint); // 使用正确的变量名 normalPoint(由 normalCenter 转换而来)
float angleDeg = std::acos(cosAngle) * 180.0f / M_PI; // 将弧度转换为角度
std::string strPath = imageData.name;
size_t lastSlash = strPath.find_last_of("/\\");
if (lastSlash == std::string::npos) lastSlash = 0; // 若无分隔符,从头开始
else lastSlash++; // 跳过分隔符
// 查找扩展名分隔符 '.' 的位置
size_t lastDot = strPath.find_last_of('.');
if (lastDot == std::string::npos) lastDot = strPath.size(); // 若无扩展名,截到末尾
// 截取文件名(不含路径和扩展名)
std::string strName = strPath.substr(lastSlash, lastDot - lastSlash);
// printf("CreateVirtualFace %s, %d\n", strName.c_str(), virtualFaceCenterFaceID);
if (!scene.is_face_delete_edge(strName, virtualFaceCenterFaceID))
{
if (scene.is_face_edge(strName, virtualFaceCenterFaceID))
{
// printf("CreateVirtualFace %s, %d, %f\n", strName.c_str(), virtualFaceCenterFaceID, angleLimit);
if (angleDeg <= 45.0f)
{
filteredCams.push_back(idxView);
}
}
else
{
filteredCams.push_back(idxView);
}
}
}
// 确保 selectedCams 是非 const 的,才能对其进行赋值
// 例如,其声明应为:IIndexArr selectedCams = ...; (不能是 const IIndexArr)
if (filteredCams.empty()) {
// 处理所有视图都被过滤的情况...
// DEBUG_EXTRA("Warning: All views filtered for virtual face due to angle condition.");
// selectedCams = SelectBestView(centerFaceDatas, virtualFaceCenterFaceID, minCommonCameras, ratioAngleToQuality);
selectedCams = filteredCams;
isVirtualFace[virtualFaceCenterFaceID] = false;
} else {
selectedCams = filteredCams;
isVirtualFace[virtualFaceCenterFaceID] = true;
}
//*/
currentVirtualFaceQueue.AddTail(virtualFaceCenterFaceID);
queuedFaces.clear();
do {
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead();
currentVirtualFaceQueue.PopHead();
// check for condition to add in current virtual face
// normal angle smaller than thMaxNormalDeviation degrees
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId];
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr()));
// if (cosFaceToCenter < cosMaxNormalDeviation)
// continue;
if (cosFaceToCenter < dynamicCosTh) // 使用动态阈值
continue;
// check if current face is seen by all cameras in selectedCams
ASSERT(!selectedCams.empty());
if (!IsFaceVisible(facesDatas[currentFaceId], selectedCams))
continue;
// Check color similarity
const Color& centerColor = faceColors[virtualFaceCenterFaceID];
const Color& currentColor = faceColors[currentFaceId];
// if (cv::norm(centerColor) > 1e-5 && cv::norm(currentColor) > 1e-5)
{
float colorDistance = cv::norm(centerColor - currentColor);
// printf("1colorDistance=%f\n", colorDistance);
if (colorDistance > thMaxColorDeviation) {
// printf("2colorDistance=%f\n", colorDistance);
// continue; // Skip if color difference is too large
}
}
/*
// #ifdef TEXOPT_USE_OPENMP
// #pragma omp critical
// #endif
// std::lock_guard<std::mutex> lock(*scene.mesh.invalidFaces.mtx);
// if (scene.mesh.invalidFaces.data.find(currentFaceId) != scene.mesh.invalidFaces.data.end()) {
// continue; // 跳过无效面
// }
// 检查是否被所有选定相机有效看到
if (!IsFaceVisibleAndValid(facesDatas[currentFaceId], selectedCams)) {
continue;
}
//*/
// remove it from remaining faces and add it to the virtual face
{
const auto posToErase = remainingFaces.FindFirst(currentFaceId);
ASSERT(posToErase != Mesh::FaceIdxArr::NO_INDEX);
remainingFaces.RemoveAtMove(posToErase);
selectedFaces[currentFaceId] = true;
virtualFace.push_back(currentFaceId);
}
// add all new neighbors to the queue
const Mesh::FaceFaces& ffaces = faceFaces[currentFaceId];
for (int i = 0; i < 3; ++i) {
const FIndex fIdx = ffaces[i];
if (fIdx == NO_ID)
continue;
if (!selectedFaces[fIdx] && queuedFaces.find(fIdx) == queuedFaces.end()) {
currentVirtualFaceQueue.AddTail(fIdx);
queuedFaces.emplace(fIdx);
}
}
} while (!currentVirtualFaceQueue.IsEmpty());
/*
if (selectedCams.empty()) {
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color;
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality;
FaceData& virtualFaceData = virtualFaceDatas.emplace_back();
virtualFaceData.color = medianColor;
virtualFaceData.quality = medianQuality;
}
*/
// compute virtual face quality and create virtual face
for (IIndex idxView: selectedCams) {
FaceData& virtualFaceData = virtualFaceDatas.emplace_back();
virtualFaceData.quality = 0;
virtualFaceData.idxView = idxView;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color = Point3f::ZERO;
#endif
int invalidQuality = 0;
Color invalidColor = Point3f::ZERO;
unsigned processedFaces(0);
bool bInvalidFacesRelative = false;
int invalidCount = 0;
for (FIndex fid : virtualFace) {
const FaceDataArr& faceDatas = facesDatas[fid];
for (FaceData& faceData: faceDatas) {
/*
// if (faceData.idxView == idxView) {
if (faceData.idxView == idxView && !faceData.bInvalidFacesRelative) {
virtualFaceData.quality += faceData.quality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color += faceData.color;
#endif
++processedFaces;
if (faceData.bInvalidFacesRelative)
++invalidCount;
break;
}
//*/
/*
int nViewCount = 0;
if (faceData.idxView == idxView) {
for (const FaceData& fd : faceDatas) {
if (fd.idxView != idxView) {
++nViewCount;
}
}
if ((nViewCount<=10) || !faceData.bInvalidFacesRelative) {
virtualFaceData.quality += faceData.quality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color += faceData.color;
#endif
++processedFaces;
// break;
}
}
//*/
/*
int nViewCount = 0;
if (faceData.idxView == idxView)
{
for (const FaceData& fd : faceDatas)
{
if ( faceData.bInvalidFacesRelative)
{
++nViewCount;
}
}
// if (faceData.bInvalidFacesRelative)
if (bHasInvalidView)
{
// invalidQuality += faceData.quality;
// #if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// invalidColor += faceData.color;
// #endif
++processedFaces;
}
else
{
// virtualFaceData.quality += faceData.quality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// virtualFaceData.color += faceData.color;
#endif
++processedFaces;
// break;
}
}
//*/
}
}
float maxLuminance = 120.0f;
float minLuminance = 90.0f;
int validViewsSize = validViews.size();
bHasInvalidView = true;
if (bHasInvalidView)
{
// 使用鲁棒的统计方法计算颜色和亮度的中心值
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color;
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality;
const float medianLuminance = ComputeMedianLuminance(sortedViews);
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值
const float colorMAD = ComputeColorMAD(sortedViews, medianColor);
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance);
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值)
const float maxColorDeviation = 0.01f * colorMAD;
const float maxLuminanceDeviation = 0.01f * luminanceMAD;
std::vector<int> validIndices;
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (colorDistance <= maxColorDeviation &&
luminanceDistance <= maxLuminanceDeviation)
{
validIndices.push_back(n);
}
else
{
const FIndex currentFaceId = currentVirtualFaceQueue.GetHead();
const Normal& faceNormal = scene.mesh.faceNormals[currentFaceId];
const float cosFaceToCenter(ComputeAngleN(normalCenter.ptr(), faceNormal.ptr()));
bool bColorSimilarity = true;
// Check color similarity
const Color& centerColor = faceColors[virtualFaceCenterFaceID];
const Color& currentColor = faceColors[currentFaceId];
float colorDistance = cv::norm(centerColor - currentColor);
// printf("1colorDistance=%f\n", colorDistance);
if (colorDistance > thMaxColorDeviation) {
// printf("2colorDistance=%f\n", colorDistance);
bColorSimilarity = false;
}
// if ((cosFaceToCenter<dynamicCosTh) || !IsFaceVisible(facesDatas[currentFaceId], selectedCams))
if (cosFaceToCenter<dynamicCosTh)
{
if (nInvalidViewCount<=2)
validIndices.push_back(n);
else
{
// if ((colorDistance <= 350.0f))
validIndices.push_back(n);
}
}
else
{
if (nInvalidViewCount<=2)
validIndices.push_back(n);
else
{
// if (bColorSimilarity)
validIndices.push_back(n);
}
}
}
}
if (validIndices.empty()) {
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (colorDistance <= maxColorDeviation)
{
// validIndices.push_back(n);
}
}
}
if (validIndices.empty()) {
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (luminanceDistance <= maxLuminanceDeviation)
{
// validIndices.push_back(n);
}
}
}
/*
if (validIndices.empty()) {
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (luminanceDistance <= maxLuminanceDeviation)
{
validIndices.push_back(n);
}
}
}
if (validIndices.empty()) {
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (colorDistance <= maxColorDeviation)
{
validIndices.push_back(n);
}
}
}
//*/
if (validViewsSize<=0&&false)
{
//*
// int nSize = sortedViews.size(); // (sortedViews.size() > 3) ? 3 : sortedViews.size();
// // 计算初始平均值
// float totalQuality = 0.0f;
// Color totalColor(0,0,0);
// for (int n = 0; n < nSize; ++n) {
// totalQuality += sortedViews[n].first;
// totalColor += sortedViews[n].second;
// }
// const float avgQuality = totalQuality / nSize;
// const Color avgColor = totalColor / nSize;
// 过滤偏差过大的视图
// std::vector<int> validIndices;
float maxColorDeviation = 0.01f; // 颜色偏差阈值
float maxLuminanceDeviation = 0.01f;
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float colorDistance = cv::norm(avgColor - viewColor);
// printf("colorDistance=%f\n", colorDistance);
float viewLuminance = MeshTexture::GetLuminance(viewColor);
float luminanceDistance = cv::norm(avgLuminance - viewLuminance);
// printf("viewLuminance=%f\n", viewLuminance);
// if ((colorDistance<=maxColorDeviation)&&
// (viewLuminance<=maxLuminance)&&
// (viewLuminance>=minLuminance)){
if ((colorDistance <= maxColorDeviation) &&
(luminanceDistance <= maxLuminanceDeviation)) {
// validIndices.push_back(n);
}
}
//*
if (validIndices.empty()) {
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float viewLuminance = MeshTexture::GetLuminance(viewColor);
float luminanceDistance = cv::norm(avgLuminance - viewLuminance);
if (luminanceDistance <= maxLuminanceDeviation){
// validIndices.push_back(n);
}
}
}
if (validIndices.empty()) {
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float colorDistance = cv::norm(avgColor - viewColor);
if (colorDistance<=maxColorDeviation){
// validIndices.push_back(n);
}
}
}
//*/
/*
float maxColorDeviation2 = 0.05f;
if (validIndices.empty()) {
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float colorDistance = cv::norm(avgColor - viewColor);
if (colorDistance <= maxColorDeviation2) {
validIndices.push_back(n);
}
}
}
//*/
/*
float totalLuminance = MeshTexture::GetLuminance(totalColor);
float avgLuminance = totalLuminance / nSize;
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float viewLuminance = MeshTexture::GetLuminance(viewColor);
float luminanceDistance = cv::norm(avgLuminance - viewLuminance);
if (luminanceDistance <= maxLuminanceDeviation) {
validIndices.push_back(n);
}
}
//*/
// 如果所有视图都被排除,保留原始平均值
if (validIndices.empty()) {
// virtualFaceData.quality = avgQuality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// virtualFaceData.color = avgColor;
#endif
// virtualFaceData.quality = avgQuality;
// virtualFaceData.color = sortedLuminViews[0].second;
virtualFaceData.quality = medianQuality;
virtualFaceData.color = medianColor;
}
else {
// 使用过滤后的视图重新计算平均值
float totalQuality2 = 0.0f;
Color totalColor2 = Color(0,0,0);
for (int idx : validIndices) {
const Color& viewColor = sortedViews[idx].second;
float colorDistance = cv::norm(avgColor - viewColor);
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation);
totalQuality2 += sortedViews[idx].first;
totalColor2 += sortedViews[idx].second * weight;
}
virtualFaceData.quality = totalQuality2 / validIndices.size();
virtualFaceData.color = totalColor2 / validIndices.size();
}
//*/
}
else if (validViewsSize>0&&validViewsSize<=2&&false)
{
/*
virtualFaceData.quality = 0;
virtualFaceData.color = Point3f::ZERO;
// int nSize = (validViews.size()>1) ? 1 : validViews.size();
int nSize = validViews.size();
for (int n=0; n<nSize; ++n)
{
virtualFaceData.quality += validViews[n].first;
virtualFaceData.color += validViews[n].second;
}
virtualFaceData.quality /= nSize;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color /= nSize;
#endif
*/
//*
int nSize = validViews.size(); // (validViews.size() > 3) ? 3 : validViews.size();
// 计算初始平均值
float totalQuality2 = 0.0f;
Color totalColor2(0,0,0);
for (int n = 0; n < nSize; ++n) {
totalQuality2 += validViews[n].first;
totalColor2 += validViews[n].second;
}
const float avgQuality2 = totalQuality2 / nSize;
const Color avgColor2 = totalColor2 / nSize;
// 过滤偏差过大的视图
// std::vector<int> validIndices;
float maxColorDeviation = 0.01f; // 颜色偏差阈值
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = validViews[n].second;
float colorDistance = cv::norm(avgColor2 - viewColor);
// printf("colorDistance=%f\n", colorDistance);
float viewLuminance = MeshTexture::GetLuminance(viewColor);
if ((colorDistance<=maxColorDeviation)&&
(viewLuminance<=120.0f)){
// if (colorDistance <= maxColorDeviation) {
// validIndices.push_back(n);
}
}
/*
// float totalLuminance = MeshTexture::GetLuminance(totalColor);
// float avgLuminance = totalLuminance / nSize;
float maxLuminanceDeviation = 0.01f;
for (int n = 0; n < nSize; ++n) {
const Color& viewColor = sortedViews[n].second;
float viewLuminance = MeshTexture::GetLuminance(viewColor);
float luminanceDistance = cv::norm(avgLuminance - viewLuminance);
// printf("luminanceDistance=%f\n", luminanceDistance);
if (luminanceDistance <= maxLuminanceDeviation) {
// validIndices.push_back(n);
}
}
//*/
// 如果所有视图都被排除,保留原始平均值
if (validIndices.empty()) {
// virtualFaceData.quality = avgQuality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// virtualFaceData.color = avgColor;
#endif
virtualFaceData.quality = medianQuality;
virtualFaceData.color = medianColor;
// virtualFaceData.color = sortedLuminViews[0].second;
/*
for (int n = 0; n < nSize; ++n) {
float lumin = sortedLuminViews[n].first;
if (lumin>=minLuminance&&lumin<=maxLuminance)
{
// virtualFaceData.quality = avgQuality;
// virtualFaceData.color = sortedLuminViews[0].second;
break;
}
}
//*/
}
else {
// 使用过滤后的视图重新计算平均值
float totalQuality2 = 0.0f;
Color totalColor2 = Color(0,0,0);
for (int idx : validIndices) {
const Color& viewColor = sortedViews[idx].second;
float colorDistance = cv::norm(avgColor - viewColor);
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation);
totalQuality2 += validViews[idx].first;
totalColor2 += validViews[idx].second * weight;
}
virtualFaceData.quality = totalQuality2 / validIndices.size();
virtualFaceData.color = totalColor2 / validIndices.size();
}
//*/
}
else
{
//*
ASSERT(processedFaces > 0);
// virtualFaceData.quality /= processedFaces;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// virtualFaceData.color /= processedFaces;
#endif
virtualFaceData.quality = 0;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color = Point3f::ZERO;
#endif
//*/
/*
// 如果所有视图都被排除,保留原始平均值
if (validIndices.empty() || validViews.size() <= 0) {
// virtualFaceData.quality = avgQuality;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
// virtualFaceData.color = avgColor;
#endif
// virtualFaceData.quality = medianQuality;
// virtualFaceData.color = medianColor;
virtualFaceData.quality /= processedFaces;
#if TEXOPT_FACEOUTLIER != TEXOPT_FACEOUTLIER_NA
virtualFaceData.color /= processedFaces;
#endif
}
else {
// 使用过滤后的视图重新计算平均值
float totalQuality2 = 0.0f;
Color totalColor2 = Color(0,0,0);
for (int idx : validIndices) {
const Color& viewColor = sortedViews[idx].second;
float colorDistance = cv::norm(avgColor - viewColor);
float weight = 1.0f / (1.0f + colorDistance/maxColorDeviation);
totalQuality2 += validViews[idx].first;
totalColor2 += validViews[idx].second * weight;
}
virtualFaceData.quality = totalQuality2 / validIndices.size();
virtualFaceData.color = totalColor2 / validIndices.size();
}
//*/
}
}
else
{
// 使用鲁棒的统计方法计算颜色和亮度的中心值
const Color medianColor = ComputeMedianColorAndQuality(sortedViews).color;
const float medianQuality = ComputeMedianColorAndQuality(sortedViews).quality;
const float medianLuminance = ComputeMedianLuminance(sortedViews);
// 计算颜色和亮度的绝对中位差(MAD)作为偏差阈值
const float colorMAD = ComputeColorMAD(sortedViews, medianColor);
const float luminanceMAD = ComputeLuminanceMAD(sortedViews, medianLuminance);
// 基于MAD设置动态阈值(3倍MAD是统计学上常用的异常值阈值)
const float maxColorDeviation = 0.01f * colorMAD;
const float maxLuminanceDeviation = 0.01f * luminanceMAD;
std::vector<int> validIndices;
for (int n = 0; n < sortedViews.size(); ++n) {
const Color& viewColor = sortedViews[n].second;
const float viewLuminance = MeshTexture::GetLuminance(viewColor);
const float colorDistance = cv::norm(viewColor - medianColor);
const float luminanceDistance = std::abs(viewLuminance - medianLuminance);
// if (colorDistance <= maxColorDeviation &&
// luminanceDistance <= maxLuminanceDeviation)
{
validIndices.push_back(n);
}
}
if (validIndices.empty()) {
virtualFaceData.quality = medianQuality;
virtualFaceData.color = medianColor;
}
else {
// 使用过滤后的视图重新计算平均值
float totalQuality2 = 0.0f;
Color totalColor2 = Color(0,0,0);
for (int idx : validIndices) {
totalQuality2 += validViews[idx].first;
totalColor2 += validViews[idx].second;
}
virtualFaceData.quality = totalQuality2 / validIndices.size();
virtualFaceData.color = totalColor2 / validIndices.size();
}
}
// virtualFaceData.bInvalidFacesRelative = (invalidCount > 1);
// virtualFaceData.bInvalidFacesRelative = (invalidCount > processedFaces * 2 / 3);
}
ASSERT(!virtualFaceDatas.empty());
}
virtualFacesDatas.emplace_back(std::move(virtualFaceDatas));
virtualFaces.emplace_back(std::move(virtualFace));
} while (!remainingFaces.empty());
return true;
}
/*
void MeshTexture::CreateVirtualFaces6(FaceDataViewArr& facesDatas, FaceDataViewArr& virtualFacesDatas, VirtualFaceIdxsArr& virtualFaces, unsigned minCommonCameras, float thMaxNormalDeviation) const
{
float thMaxColorDeviation = 0.000001f;
@ -5293,7 +6116,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5293,7 +6116,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
// CreateVirtualFaces(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras);
// CreateVirtualFaces3(facesDatas, virtualFacesDatas, virtualFaces, minCommonCameras);
// CreateVirtualFaces4(facesDatas, virtualFacesDatas, virtualFaces, mapFaceToVirtualFace, minCommonCameras);
CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras);
// CreateVirtualFaces6(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras);
CreateVirtualFaces7(facesDatas, virtualFacesDatas, virtualFaces, isVirtualFace, minCommonCameras);
size_t controlCounter(0);
FOREACH(idxVF, virtualFaces) {
@ -5438,7 +6262,9 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5438,7 +6262,9 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
if (numViews <= minSingleView) {
// if (true) {
std::vector<std::pair<float, IIndex>> sortedViews;
std::vector<std::pair<float, Color>> sortedViews2;
sortedViews.reserve(faceDatas.size());
sortedViews2.reserve(faceDatas.size());
for (const FaceData& fd : faceDatas) {
if (fd.bInvalidFacesRelative)
@ -5450,12 +6276,15 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5450,12 +6276,15 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
// const Label label = (Label)fd.idxView + 1;
// inference.SetDataCost(label, f, MaxEnergy);
sortedViews.emplace_back(fd.quality, fd.idxView);
sortedViews2.emplace_back(fd.quality, fd.color);
}
else
{
// if (fd.quality<=999.0)
{
sortedViews.emplace_back(fd.quality, fd.idxView);
sortedViews2.emplace_back(fd.quality, fd.color);
// printf("1fd.quality=%f\n", fd.quality);
}
// else
@ -5464,6 +6293,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5464,6 +6293,8 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
}
std::sort(sortedViews.begin(), sortedViews.end(),
[](const auto& a, const auto& b) { return a.first > b.first; });
std::sort(sortedViews2.begin(), sortedViews2.end(),
[](const auto& a, const auto& b) { return a.first > b.first; });
// 设置数据成本:最佳视角成本最低,其他按质量排序递增
const float baseCostScale = 0.1f; // 基础成本缩放系数
const float costStep = 0.3f; // 相邻视角成本增量
@ -5479,7 +6310,26 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5479,7 +6310,26 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
continue;
}
//*
int nSize = sortedViews2.size();
float totalQuality = 0.0f;
Color totalColor(0,0,0);
for (int n = 0; n < nSize; ++n) {
totalQuality += sortedViews2[n].first;
totalColor += sortedViews2[n].second;
}
const float avgQuality = totalQuality / nSize;
const Color avgColor = totalColor / nSize;
if (sortedViews2.size()<=0)
continue;
// printf("sortedViews2.size=%d\n", sortedViews2.size());
const Color medianColor = ComputeMedianColorAndQuality(sortedViews2).color;
const float medianQuality = ComputeMedianColorAndQuality(sortedViews2).quality;
//*/
for (size_t i = 0; i < sortedViews.size(); ++i) {
const Label label = (Label)sortedViews[i].second + 1;
float cost;
@ -5499,6 +6349,26 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT @@ -5499,6 +6349,26 @@ bool MeshTexture::FaceViewSelection3( unsigned minCommonCameras, float fOutlierT
// continue;
// 过滤不可见的面
//*
const Color& viewColor = sortedViews2[i].second;
// float colorDistance = cv::norm(avgColor - viewColor);
// if (colorDistance>0.0001)
// if (nSize>0)
// printf("colorDistance=%f, nSize=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize,
// avgColor.x, avgColor.y, avgColor.z, viewColor.x, viewColor.y, viewColor.z);
// if (colorDistance>0.000)
// continue;
float colorDistance = cv::norm(viewColor - medianColor);
if (colorDistance>0.0000)
printf("colorDistance=%f, nSize=%d, i=%d, %f, %f, %f, %f, %f, %f\n", colorDistance, nSize, i,
medianColor.x, medianColor.y, medianColor.z, viewColor.x, viewColor.y, viewColor.z);
// float luminanceDistance = std::abs(viewLuminance - medianLuminance);
if (colorDistance>10.0000)
continue;
//*/
if (i == 0) {
// 最佳视角
// cost = (1.f - sortedViews[i].first / normQuality) * MaxEnergy * baseCostScale;

7
libs/MVS/mask_face_occlusion.py

@ -5,7 +5,7 @@ from scipy.spatial.transform import Rotation @@ -5,7 +5,7 @@ from scipy.spatial.transform import Rotation
import sys
sys.path.append("/home/algo/Documents/mask_face_occlusion/")
from colmap_loader import read_cameras_text, read_images_text, read_int_text, write_int_text, read_indices_from_file
from utils.get_pose_matrix import get_w2c
from get_pose_matrix import get_w2c
import argparse
import matplotlib.pyplot as plt
import collections
@ -1014,8 +1014,9 @@ class ModelProcessor: @@ -1014,8 +1014,9 @@ class ModelProcessor:
}
img_name = img.name[:-4]
print("img_name=", img_name, n)
# if (img_name!="72_2" and img_name!="82_2" and img_name!="83_2"): # 82_2 72_2
# if (img_name!="74_8"):
# if (img_name!="73_8" and img_name!="52_8" and img_name!="62_8"):
# if (img_name!="52_8" and img_name!="62_8"):
# if (img_name!="52_8"):
# continue
# face_visibility2, face_contour = self._flag_contour(camera_data, None)
# countour_faces_dict[img.name[:-4]] = np.where(face_contour)[0].tolist()

Loading…
Cancel
Save