|
|
|
@ -111,7 +111,13 @@ struct TRWSInference { |
|
|
|
typedef MRFEnergy<TypePotts>::Options MRFOptions; |
|
|
|
typedef MRFEnergy<TypePotts>::Options MRFOptions; |
|
|
|
|
|
|
|
|
|
|
|
CAutoPtr<MRFEnergyType> mrf; |
|
|
|
CAutoPtr<MRFEnergyType> mrf; |
|
|
|
CAutoPtrArr<MRFEnergyType::NodeId> nodes; |
|
|
|
CAutoPtrArr<MRFEnerg |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MeshTexture::ApplyColorConsistencyOptimization( |
|
|
|
|
|
|
|
Image8U3& texture, |
|
|
|
|
|
|
|
const std::vector<std::vector<PixelSample>>& pixelSamples, |
|
|
|
|
|
|
|
int textureSize, |
|
|
|
|
|
|
|
PyType::NodeId> nodes; |
|
|
|
|
|
|
|
|
|
|
|
inline TRWSInference() {} |
|
|
|
inline TRWSInference() {} |
|
|
|
void Init(NodeID nNodes, LabelID nLabels) { |
|
|
|
void Init(NodeID nNodes, LabelID nLabels) { |
|
|
|
@ -186,6 +192,14 @@ struct PatchQualityInfo { |
|
|
|
std::vector<float> faceQualities; |
|
|
|
std::vector<float> faceQualities; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 在函数外部定义 PixelSample
|
|
|
|
|
|
|
|
struct PixelSample { |
|
|
|
|
|
|
|
cv::Vec3f color; // 颜色(BGR格式)
|
|
|
|
|
|
|
|
float weight; // 权重
|
|
|
|
|
|
|
|
IIndex viewIdx; // 视图索引
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
std::vector<PatchQualityInfo> patchQualityInfos; |
|
|
|
std::vector<PatchQualityInfo> patchQualityInfos; |
|
|
|
|
|
|
|
|
|
|
|
struct MeshTexture { |
|
|
|
struct MeshTexture { |
|
|
|
@ -593,6 +607,12 @@ public: |
|
|
|
unsigned nTextureSizeMultiple, |
|
|
|
unsigned nTextureSizeMultiple, |
|
|
|
Pixel8U colEmpty, |
|
|
|
Pixel8U colEmpty, |
|
|
|
float fSharpnessWeight); |
|
|
|
float fSharpnessWeight); |
|
|
|
|
|
|
|
void FillTextureGaps(Image8U3& texture, Pixel8U colEmpty); |
|
|
|
|
|
|
|
void ApplyColorConsistencyOptimization( |
|
|
|
|
|
|
|
Image8U3& texture, |
|
|
|
|
|
|
|
const std::vector<std::vector<PixelSample>>& pixelSamples, |
|
|
|
|
|
|
|
int textureSize, |
|
|
|
|
|
|
|
Pixel8U colEmpty); |
|
|
|
void ApplyGlobalColorCorrection(Image8U3& texture, Pixel8U colEmpty, float strength); |
|
|
|
void ApplyGlobalColorCorrection(Image8U3& texture, Pixel8U colEmpty, float strength); |
|
|
|
void ApplySoftSharpening(Image8U3& texture, float strength, Pixel8U colEmpty); |
|
|
|
void ApplySoftSharpening(Image8U3& texture, float strength, Pixel8U colEmpty); |
|
|
|
Mesh::Image8U3Arr GenerateTextureAtlasFromUV( |
|
|
|
Mesh::Image8U3Arr GenerateTextureAtlasFromUV( |
|
|
|
@ -13703,6 +13723,129 @@ bool MeshTexture::TextureWithExistingUV( |
|
|
|
return false; |
|
|
|
return false; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MeshTexture::FillTextureGaps(Image8U3& texture, Pixel8U colEmpty) { |
|
|
|
|
|
|
|
if (texture.empty()) return; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cv::Mat textureMat = (cv::Mat&)texture; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 创建掩码
|
|
|
|
|
|
|
|
cv::Mat mask = cv::Mat::zeros(texture.rows, texture.cols, CV_8UC1); |
|
|
|
|
|
|
|
for (int y = 0; y < texture.rows; ++y) { |
|
|
|
|
|
|
|
for (int x = 0; x < texture.cols; ++x) { |
|
|
|
|
|
|
|
if (texture(y, x) != colEmpty) { |
|
|
|
|
|
|
|
mask.at<uchar>(y, x) = 255; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 使用图像修复算法填充空隙
|
|
|
|
|
|
|
|
cv::Mat inpainted; |
|
|
|
|
|
|
|
cv::inpaint(textureMat, mask, inpainted, 3, cv::INPAINT_NS); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 只替换空区域
|
|
|
|
|
|
|
|
for (int y = 0; y < texture.rows; ++y) { |
|
|
|
|
|
|
|
for (int x = 0; x < texture.cols; ++x) { |
|
|
|
|
|
|
|
if (texture(y, x) == colEmpty) { |
|
|
|
|
|
|
|
cv::Vec3b color = inpainted.at<cv::Vec3b>(y, x); |
|
|
|
|
|
|
|
Pixel8U pixel; |
|
|
|
|
|
|
|
pixel.b = color[0]; |
|
|
|
|
|
|
|
pixel.g = color[1]; |
|
|
|
|
|
|
|
pixel.r = color[2]; |
|
|
|
|
|
|
|
texture(y, x) = pixel; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void MeshTexture::ApplyColorConsistencyOptimization( |
|
|
|
|
|
|
|
Image8U3& texture, |
|
|
|
|
|
|
|
const std::vector<std::vector<PixelSample>>& pixelSamples, |
|
|
|
|
|
|
|
int textureSize, |
|
|
|
|
|
|
|
Pixel8U colEmpty) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
if (pixelSamples.empty()) return; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cv::Mat textureMat = (cv::Mat&)texture; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 创建临时的颜色缓冲区
|
|
|
|
|
|
|
|
cv::Mat3f newColor(texture.rows, texture.cols, cv::Vec3f(0, 0, 0)); |
|
|
|
|
|
|
|
cv::Mat1f newWeight(texture.rows, texture.cols, 0.0f); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算邻域颜色一致性
|
|
|
|
|
|
|
|
const int kernelSize = 3; |
|
|
|
|
|
|
|
const int halfKernel = kernelSize / 2; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int y = halfKernel; y < texture.rows - halfKernel; ++y) { |
|
|
|
|
|
|
|
for (int x = halfKernel; x < texture.cols - halfKernel; ++x) { |
|
|
|
|
|
|
|
int pixelIdx = y * textureSize + x; |
|
|
|
|
|
|
|
const auto& samples = pixelSamples[pixelIdx]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (samples.empty()) continue; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 收集邻域颜色
|
|
|
|
|
|
|
|
std::vector<cv::Vec3f> neighborColors; |
|
|
|
|
|
|
|
for (int dy = -halfKernel; dy <= halfKernel; ++dy) { |
|
|
|
|
|
|
|
for (int dx = -halfKernel; dx <= halfKernel; ++dx) { |
|
|
|
|
|
|
|
int nx = x + dx; |
|
|
|
|
|
|
|
int ny = y + dy; |
|
|
|
|
|
|
|
int neighborIdx = ny * textureSize + nx; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!pixelSamples[neighborIdx].empty()) { |
|
|
|
|
|
|
|
// 获取当前像素颜色
|
|
|
|
|
|
|
|
Pixel8U pixel = texture(ny, nx); |
|
|
|
|
|
|
|
if (pixel != colEmpty) { |
|
|
|
|
|
|
|
neighborColors.push_back(cv::Vec3f(pixel.b, pixel.g, pixel.r)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (neighborColors.size() < 3) continue; // 需要足够的邻域信息
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算颜色统计
|
|
|
|
|
|
|
|
cv::Vec3f mean(0, 0, 0); |
|
|
|
|
|
|
|
for (const auto& color : neighborColors) { |
|
|
|
|
|
|
|
mean += color; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
mean /= (float)neighborColors.size(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算方差
|
|
|
|
|
|
|
|
cv::Vec3f variance(0, 0, 0); |
|
|
|
|
|
|
|
for (const auto& color : neighborColors) { |
|
|
|
|
|
|
|
cv::Vec3f diff = color - mean; |
|
|
|
|
|
|
|
variance[0] += diff[0] * diff[0]; |
|
|
|
|
|
|
|
variance[1] += diff[1] * diff[1]; |
|
|
|
|
|
|
|
variance[2] += diff[2] * diff[2]; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
variance /= (float)neighborColors.size(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 获取当前像素颜色
|
|
|
|
|
|
|
|
Pixel8U currentPixel = texture(y, x); |
|
|
|
|
|
|
|
cv::Vec3f currentColor(currentPixel.b, currentPixel.g, currentPixel.r); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算颜色调整
|
|
|
|
|
|
|
|
const float alpha = 0.3f; // 调整强度
|
|
|
|
|
|
|
|
cv::Vec3f adjustedColor = currentColor; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 如果颜色差异较大,向邻域均值调整
|
|
|
|
|
|
|
|
cv::Vec3f diff = currentColor - mean; |
|
|
|
|
|
|
|
float diffMag = std::sqrt(diff[0]*diff[0] + diff[1]*diff[1] + diff[2]*diff[2]); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (diffMag > 10.0f) { // 颜色差异阈值
|
|
|
|
|
|
|
|
adjustedColor = currentColor * (1.0f - alpha) + mean * alpha; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 应用调整
|
|
|
|
|
|
|
|
Pixel8U newPixel; |
|
|
|
|
|
|
|
newPixel.r = (unsigned char)cv::saturate_cast<uchar>(adjustedColor[2]); |
|
|
|
|
|
|
|
newPixel.g = (unsigned char)cv::saturate_cast<uchar>(adjustedColor[1]); |
|
|
|
|
|
|
|
newPixel.b = (unsigned char)cv::saturate_cast<uchar>(adjustedColor[0]); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
texture(y, x) = newPixel; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
const LabelArr& faceLabels, |
|
|
|
const LabelArr& faceLabels, |
|
|
|
const IIndexArr& views, |
|
|
|
const IIndexArr& views, |
|
|
|
@ -13713,7 +13856,10 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
Pixel8U colEmpty, |
|
|
|
Pixel8U colEmpty, |
|
|
|
float fSharpnessWeight) |
|
|
|
float fSharpnessWeight) |
|
|
|
{ |
|
|
|
{ |
|
|
|
DEBUG_EXTRA("GenerateTextureAtlasWith3DBridge - 使用3D几何坐标作为桥梁,修复白平衡问题"); |
|
|
|
DEBUG_EXTRA("GenerateTextureAtlasWith3DBridge - 使用3D几何坐标和多视图融合"); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 定义INVALID_INDEX常量
|
|
|
|
|
|
|
|
const IIndex INVALID_INDEX = (IIndex)-1; |
|
|
|
|
|
|
|
|
|
|
|
// 1. 分析外部UV布局
|
|
|
|
// 1. 分析外部UV布局
|
|
|
|
AABB2f uvBounds(true); |
|
|
|
AABB2f uvBounds(true); |
|
|
|
@ -13746,9 +13892,6 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
|
|
|
|
|
|
|
|
// 使用统一的背景色设置
|
|
|
|
// 使用统一的背景色设置
|
|
|
|
DEBUG_EXTRA("设置背景色: RGB(%d,%d,%d)", colEmpty.r, colEmpty.g, colEmpty.b); |
|
|
|
DEBUG_EXTRA("设置背景色: RGB(%d,%d,%d)", colEmpty.r, colEmpty.g, colEmpty.b); |
|
|
|
|
|
|
|
|
|
|
|
// 注意:Image8U3的setTo使用cv::Scalar,是BGR顺序
|
|
|
|
|
|
|
|
// 但colEmpty是RGB顺序,需要转换
|
|
|
|
|
|
|
|
cv::Scalar cvEmpty(colEmpty.b, colEmpty.g, colEmpty.r); |
|
|
|
cv::Scalar cvEmpty(colEmpty.b, colEmpty.g, colEmpty.r); |
|
|
|
textureAtlas.setTo(cvEmpty); |
|
|
|
textureAtlas.setTo(cvEmpty); |
|
|
|
|
|
|
|
|
|
|
|
@ -13757,38 +13900,81 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
uvBounds.ptMin.x(), uvBounds.ptMin.y(), |
|
|
|
uvBounds.ptMin.x(), uvBounds.ptMin.y(), |
|
|
|
uvBounds.ptMax.x(), uvBounds.ptMax.y()); |
|
|
|
uvBounds.ptMax.x(), uvBounds.ptMax.y()); |
|
|
|
|
|
|
|
|
|
|
|
// 2. 为每个视图创建颜色统计
|
|
|
|
std::vector<std::vector<PixelSample>> pixelSamples(textureSize * textureSize); |
|
|
|
std::vector<cv::Vec3d> viewColorSums(images.size(), cv::Vec3d(0, 0, 0)); |
|
|
|
|
|
|
|
std::vector<int> viewPixelCounts(images.size(), 0); |
|
|
|
|
|
|
|
std::vector<std::vector<cv::Vec3b>> viewColorSamples(images.size()); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 3. 第一次遍历:收集每个视图的颜色统计
|
|
|
|
// 3. 第一次遍历:收集所有视图的采样
|
|
|
|
DEBUG_EXTRA("第一次遍历:收集视图颜色统计"); |
|
|
|
DEBUG_EXTRA("第一次遍历:收集多视图采样"); |
|
|
|
|
|
|
|
int totalSamples = 0; |
|
|
|
|
|
|
|
|
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#pragma omp parallel for schedule(dynamic) |
|
|
|
#pragma omp parallel for schedule(dynamic) reduction(+:totalSamples) |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { |
|
|
|
for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { |
|
|
|
const FIndex faceID = (FIndex)idxFace; |
|
|
|
const FIndex faceID = (FIndex)idxFace; |
|
|
|
const Label label = faceLabels[faceID]; |
|
|
|
const Label label = faceLabels[faceID]; |
|
|
|
|
|
|
|
|
|
|
|
if (label == 0) continue; |
|
|
|
if (label == 0) continue; |
|
|
|
const IIndex idxView = label - 1; |
|
|
|
const IIndex primaryView = label - 1; |
|
|
|
if (idxView >= images.size()) continue; |
|
|
|
if (primaryView >= images.size()) continue; |
|
|
|
|
|
|
|
|
|
|
|
const Image& sourceImage = images[idxView]; |
|
|
|
|
|
|
|
const TexCoord* meshUVs = &scene.mesh.faceTexcoords[faceID * 3]; |
|
|
|
const TexCoord* meshUVs = &scene.mesh.faceTexcoords[faceID * 3]; |
|
|
|
const Face& face = scene.mesh.faces[faceID]; |
|
|
|
const Face& face = scene.mesh.faces[faceID]; |
|
|
|
|
|
|
|
const Image& sourceImage = images[primaryView]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算面法线
|
|
|
|
|
|
|
|
Vertex v0 = vertices[face[1]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex v1 = vertices[face[2]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex crossProduct = v0.cross(v1); |
|
|
|
|
|
|
|
double normVal = cv::norm(crossProduct); |
|
|
|
|
|
|
|
Vertex normal(0, 0, 1); // 默认法向量
|
|
|
|
|
|
|
|
if (normVal > 0) { |
|
|
|
|
|
|
|
normal = crossProduct / (float)normVal; // 归一化
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 在面的中心采样几个点
|
|
|
|
// 计算面的UV边界
|
|
|
|
for (int i = 0; i < 3; ++i) { // 采样三个顶点
|
|
|
|
AABB2f faceBounds(true); |
|
|
|
const Vertex& worldPoint = vertices[face[i]]; |
|
|
|
for (int i = 0; i < 3; ++i) { |
|
|
|
|
|
|
|
faceBounds.InsertFull(meshUVs[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int startX = (int)(faceBounds.ptMin.x() * textureSize); |
|
|
|
|
|
|
|
int startY = (int)(faceBounds.ptMin.y() * textureSize); |
|
|
|
|
|
|
|
int endX = (int)(faceBounds.ptMax.x() * textureSize); |
|
|
|
|
|
|
|
int endY = (int)(faceBounds.ptMax.y() * textureSize); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
startX = std::max(0, std::min(startX, textureSize - 1)); |
|
|
|
|
|
|
|
startY = std::max(0, std::min(startY, textureSize - 1)); |
|
|
|
|
|
|
|
endX = std::max(0, std::min(endX, textureSize - 1)); |
|
|
|
|
|
|
|
endY = std::max(0, std::min(endY, textureSize - 1)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (startX >= endX || startY >= endY) continue; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 采样密度
|
|
|
|
|
|
|
|
const int sampleStep = 1; // 每个像素都采样
|
|
|
|
|
|
|
|
int faceSamples = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int y = startY; y <= endY; y += sampleStep) { |
|
|
|
|
|
|
|
for (int x = startX; x <= endX; x += sampleStep) { |
|
|
|
|
|
|
|
const Point2f texCoord((x + 0.5f) / textureSize, (y + 0.5f) / textureSize); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算重心坐标
|
|
|
|
|
|
|
|
Point3f barycentric; |
|
|
|
|
|
|
|
if (!PointInTriangle(texCoord, meshUVs[0], meshUVs[1], meshUVs[2], barycentric)) { |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算3D点
|
|
|
|
|
|
|
|
const Vertex worldPoint = |
|
|
|
|
|
|
|
vertices[face[0]] * barycentric.x + |
|
|
|
|
|
|
|
vertices[face[1]] * barycentric.y + |
|
|
|
|
|
|
|
vertices[face[2]] * barycentric.z; |
|
|
|
|
|
|
|
|
|
|
|
// 检查3D点是否在相机前方
|
|
|
|
// 检查3D点是否在相机前方
|
|
|
|
if (!sourceImage.camera.IsInFront(worldPoint)) { |
|
|
|
if (!sourceImage.camera.IsInFront(worldPoint)) { |
|
|
|
continue; |
|
|
|
continue; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 投影到图像
|
|
|
|
Point2f imgPoint = sourceImage.camera.ProjectPointP(worldPoint); |
|
|
|
Point2f imgPoint = sourceImage.camera.ProjectPointP(worldPoint); |
|
|
|
|
|
|
|
|
|
|
|
// 确保在图像范围内
|
|
|
|
// 确保在图像范围内
|
|
|
|
@ -13800,77 +13986,179 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
// 采样图像
|
|
|
|
// 采样图像
|
|
|
|
Pixel8U sampledColor = SampleImageBilinear(sourceImage.image, imgPoint); |
|
|
|
Pixel8U sampledColor = SampleImageBilinear(sourceImage.image, imgPoint); |
|
|
|
|
|
|
|
|
|
|
|
// 存储采样颜色
|
|
|
|
// 计算权重
|
|
|
|
|
|
|
|
float weight = 1.0f; // 默认权重
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算第一个顶点在图像上的投影(用于分辨率计算)
|
|
|
|
|
|
|
|
Point2f proj0 = sourceImage.camera.ProjectPointP(vertices[face[0]]); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算到相机中心的距离
|
|
|
|
|
|
|
|
Vertex cameraPos(sourceImage.camera.C.x, sourceImage.camera.C.y, sourceImage.camera.C.z); |
|
|
|
|
|
|
|
float dist = cv::norm(cameraPos - worldPoint); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算视角与法线夹角
|
|
|
|
|
|
|
|
Vertex viewDir = cameraPos - worldPoint; |
|
|
|
|
|
|
|
float viewDirNorm = cv::norm(viewDir); |
|
|
|
|
|
|
|
if (viewDirNorm > 0) { |
|
|
|
|
|
|
|
viewDir = viewDir / viewDirNorm; |
|
|
|
|
|
|
|
float viewAngle = std::abs(viewDir.dot(normal)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算分辨率
|
|
|
|
|
|
|
|
float resolution = 1.0f; |
|
|
|
|
|
|
|
double projDist = cv::norm(imgPoint - proj0); |
|
|
|
|
|
|
|
if (projDist > 1e-6) { |
|
|
|
|
|
|
|
resolution = 1.0f / (float)projDist; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 综合权重 = 视角质量 * 分辨率
|
|
|
|
|
|
|
|
weight = viewAngle * resolution; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 添加距离衰减
|
|
|
|
|
|
|
|
float distFactor = std::exp(-dist * 0.001f); |
|
|
|
|
|
|
|
weight *= distFactor; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 限制权重范围
|
|
|
|
|
|
|
|
weight = std::max(0.5f, std::min(2.0f, weight)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 存储采样
|
|
|
|
|
|
|
|
int pixelIdx = y * textureSize + x; |
|
|
|
|
|
|
|
PixelSample sample; |
|
|
|
|
|
|
|
sample.color = cv::Vec3f(sampledColor.b, sampledColor.g, sampledColor.r); // 转换为BGR
|
|
|
|
|
|
|
|
sample.weight = weight; |
|
|
|
|
|
|
|
sample.viewIdx = primaryView; |
|
|
|
|
|
|
|
|
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#pragma omp critical |
|
|
|
#pragma omp critical |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
{ |
|
|
|
{ |
|
|
|
viewColorSamples[idxView].push_back(cv::Vec3b(sampledColor.b, sampledColor.g, sampledColor.r)); |
|
|
|
pixelSamples[pixelIdx].push_back(sample); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
faceSamples++; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
totalSamples += faceSamples; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 4. 计算每个视图的颜色调整因子
|
|
|
|
DEBUG_EXTRA("采样完成: 总采样点 %d", totalSamples); |
|
|
|
std::vector<cv::Vec3d> viewColorAdjustments(images.size(), cv::Vec3d(1.0, 1.0, 1.0)); |
|
|
|
|
|
|
|
cv::Vec3d globalMean(0, 0, 0); |
|
|
|
|
|
|
|
int globalSampleCount = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < images.size(); ++i) { |
|
|
|
// 4. 第二次遍历:从其他视图补充采样
|
|
|
|
if (viewColorSamples[i].empty()) continue; |
|
|
|
DEBUG_EXTRA("第二次遍历:从其他视图补充采样"); |
|
|
|
|
|
|
|
|
|
|
|
cv::Vec3d sum(0, 0, 0); |
|
|
|
// 找出每个面的其他可见视图
|
|
|
|
for (const auto& pixel : viewColorSamples[i]) { |
|
|
|
std::vector<std::vector<IIndex>> faceVisibleViews(scene.mesh.faces.size()); |
|
|
|
sum[0] += pixel[0]; // B
|
|
|
|
|
|
|
|
sum[1] += pixel[1]; // G
|
|
|
|
#ifdef _USE_OPENMP |
|
|
|
sum[2] += pixel[2]; // R
|
|
|
|
#pragma omp parallel for schedule(dynamic) |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { |
|
|
|
|
|
|
|
const FIndex faceID = (FIndex)idxFace; |
|
|
|
|
|
|
|
const Face& face = scene.mesh.faces[faceID]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算面法线
|
|
|
|
|
|
|
|
Vertex v0 = vertices[face[1]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex v1 = vertices[face[2]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex crossProduct = v0.cross(v1); |
|
|
|
|
|
|
|
double normVal = cv::norm(crossProduct); |
|
|
|
|
|
|
|
Vertex normal(0, 0, 1); // 默认法向量
|
|
|
|
|
|
|
|
if (normVal > 0) { |
|
|
|
|
|
|
|
normal = crossProduct / (float)normVal; // 归一化
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算面中心
|
|
|
|
|
|
|
|
Vertex faceCenter(0, 0, 0); |
|
|
|
|
|
|
|
for (int v = 0; v < 3; ++v) { |
|
|
|
|
|
|
|
faceCenter += vertices[face[v]]; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
faceCenter /= 3.0f; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 检查所有视图
|
|
|
|
|
|
|
|
std::vector<std::pair<float, IIndex>> viewScores; |
|
|
|
|
|
|
|
for (IIndex i = 0; i < images.size(); ++i) { |
|
|
|
|
|
|
|
const Image& img = images[i]; |
|
|
|
|
|
|
|
|
|
|
|
cv::Vec3d mean = sum / (double)viewColorSamples[i].size(); |
|
|
|
// 检查可见性
|
|
|
|
globalMean += sum; |
|
|
|
if (!img.camera.IsInFront(faceCenter)) continue; |
|
|
|
globalSampleCount += viewColorSamples[i].size(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEBUG_EXTRA("视图 %zu: 平均颜色 B=%.1f, G=%.1f, R=%.1f, 样本数=%zu", |
|
|
|
// 计算投影点
|
|
|
|
i, mean[0], mean[1], mean[2], viewColorSamples[i].size()); |
|
|
|
Point2f imgPoint = img.camera.ProjectPointP(faceCenter); |
|
|
|
|
|
|
|
if (imgPoint.x < 0 || imgPoint.x >= img.image.cols || |
|
|
|
|
|
|
|
imgPoint.y < 0 || imgPoint.y >= img.image.rows) { |
|
|
|
|
|
|
|
continue; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 计算全局平均颜色
|
|
|
|
// 计算视角方向
|
|
|
|
if (globalSampleCount > 0) { |
|
|
|
Vertex cameraPos(img.camera.C.x, img.camera.C.y, img.camera.C.z); |
|
|
|
globalMean /= globalSampleCount; |
|
|
|
Vertex viewDirVec = cameraPos - faceCenter; |
|
|
|
DEBUG_EXTRA("全局平均颜色: B=%.1f, G=%.1f, R=%.1f", |
|
|
|
float viewDirNorm = cv::norm(viewDirVec); |
|
|
|
globalMean[0], globalMean[1], globalMean[2]); |
|
|
|
|
|
|
|
|
|
|
|
if (viewDirNorm <= 1e-6) continue; // 避免除以零
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Vertex viewDir = viewDirVec / viewDirNorm; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算视角质量(法线与视角方向的夹角)
|
|
|
|
|
|
|
|
float viewAngle = std::abs(viewDir.dot(normal)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算分辨率 - 计算两个顶点投影点之间的欧氏距离
|
|
|
|
|
|
|
|
Point2f proj0 = img.camera.ProjectPointP(vertices[face[0]]); |
|
|
|
|
|
|
|
double projDist = cv::norm(imgPoint - proj0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 如果两个投影点太近,则跳过
|
|
|
|
|
|
|
|
if (projDist < 1e-6) continue; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
float resolution = 1.0f / (float)projDist; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 综合得分
|
|
|
|
|
|
|
|
float score = viewAngle * resolution; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (score > 0.1f) { // 阈值
|
|
|
|
|
|
|
|
viewScores.push_back({score, i}); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 5. 采样纹理
|
|
|
|
// 按得分排序
|
|
|
|
DEBUG_EXTRA("第二次遍历:采样纹理并应用颜色校正"); |
|
|
|
std::sort(viewScores.begin(), viewScores.end(), |
|
|
|
|
|
|
|
[](const auto& a, const auto& b) { return a.first > b.first; }); |
|
|
|
|
|
|
|
|
|
|
|
cv::Mat1f weightAccum(textureSize, textureSize, 0.0f); |
|
|
|
// 选择前4个最佳视图
|
|
|
|
cv::Mat3f colorAccum(textureSize, textureSize, cv::Vec3f(0, 0, 0)); |
|
|
|
faceVisibleViews[idxFace].push_back(faceLabels[idxFace] - 1); // 主视图
|
|
|
|
|
|
|
|
for (size_t i = 0; i < std::min(viewScores.size(), (size_t)3); ++i) { |
|
|
|
|
|
|
|
if (viewScores[i].second != faceLabels[idxFace] - 1) { |
|
|
|
|
|
|
|
faceVisibleViews[idxFace].push_back(viewScores[i].second); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int processedFaces = 0; |
|
|
|
// 5. 从其他视图采样
|
|
|
|
int sampledPixels = 0; |
|
|
|
int additionalSamples = 0; |
|
|
|
int failedFaces = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#pragma omp parallel for schedule(dynamic) reduction(+:processedFaces, sampledPixels, failedFaces) |
|
|
|
#pragma omp parallel for schedule(dynamic) reduction(+:additionalSamples) |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { |
|
|
|
for (int_t idxFace = 0; idxFace < (int_t)scene.mesh.faces.size(); ++idxFace) { |
|
|
|
const FIndex faceID = (FIndex)idxFace; |
|
|
|
const FIndex faceID = (FIndex)idxFace; |
|
|
|
const Label label = faceLabels[faceID]; |
|
|
|
const Label label = faceLabels[faceID]; |
|
|
|
|
|
|
|
|
|
|
|
if (label == 0) { |
|
|
|
if (label == 0) continue; |
|
|
|
failedFaces++; |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const IIndex idxView = label - 1; |
|
|
|
|
|
|
|
if (idxView >= images.size()) { |
|
|
|
|
|
|
|
failedFaces++; |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const TexCoord* meshUVs = &scene.mesh.faceTexcoords[faceID * 3]; |
|
|
|
const TexCoord* meshUVs = &scene.mesh.faceTexcoords[faceID * 3]; |
|
|
|
const Face& face = scene.mesh.faces[faceID]; |
|
|
|
const Face& face = scene.mesh.faces[faceID]; |
|
|
|
const Image& sourceImage = images[idxView]; |
|
|
|
|
|
|
|
|
|
|
|
// 计算面法线
|
|
|
|
|
|
|
|
Vertex v0 = vertices[face[1]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex v1 = vertices[face[2]] - vertices[face[0]]; |
|
|
|
|
|
|
|
Vertex crossProduct = v0.cross(v1); |
|
|
|
|
|
|
|
double normVal = cv::norm(crossProduct); |
|
|
|
|
|
|
|
Vertex normal(0, 0, 1); // 默认法向量
|
|
|
|
|
|
|
|
if (normVal > 0) { |
|
|
|
|
|
|
|
normal = crossProduct / (float)normVal; // 归一化
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 获取可见视图
|
|
|
|
|
|
|
|
const auto& visibleViews = faceVisibleViews[idxFace]; |
|
|
|
|
|
|
|
if (visibleViews.size() <= 1) continue; // 只有一个视图
|
|
|
|
|
|
|
|
|
|
|
|
// 计算面的UV边界
|
|
|
|
// 计算面的UV边界
|
|
|
|
AABB2f faceBounds(true); |
|
|
|
AABB2f faceBounds(true); |
|
|
|
@ -13888,14 +14176,9 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
endX = std::max(0, std::min(endX, textureSize - 1)); |
|
|
|
endX = std::max(0, std::min(endX, textureSize - 1)); |
|
|
|
endY = std::max(0, std::min(endY, textureSize - 1)); |
|
|
|
endY = std::max(0, std::min(endY, textureSize - 1)); |
|
|
|
|
|
|
|
|
|
|
|
if (startX >= endX || startY >= endY) { |
|
|
|
if (startX >= endX || startY >= endY) continue; |
|
|
|
failedFaces++; |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int faceSampledPixels = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 采样纹理
|
|
|
|
// 对每个像素,从其他视图采样
|
|
|
|
for (int y = startY; y <= endY; ++y) { |
|
|
|
for (int y = startY; y <= endY; ++y) { |
|
|
|
for (int x = startX; x <= endX; ++x) { |
|
|
|
for (int x = startX; x <= endX; ++x) { |
|
|
|
const Point2f texCoord((x + 0.5f) / textureSize, (y + 0.5f) / textureSize); |
|
|
|
const Point2f texCoord((x + 0.5f) / textureSize, (y + 0.5f) / textureSize); |
|
|
|
@ -13912,15 +14195,20 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
vertices[face[1]] * barycentric.y + |
|
|
|
vertices[face[1]] * barycentric.y + |
|
|
|
vertices[face[2]] * barycentric.z; |
|
|
|
vertices[face[2]] * barycentric.z; |
|
|
|
|
|
|
|
|
|
|
|
// 检查3D点是否在相机前方
|
|
|
|
int pixelIdx = y * textureSize + x; |
|
|
|
if (!sourceImage.camera.IsInFront(worldPoint)) { |
|
|
|
|
|
|
|
continue; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 从原始图像采样
|
|
|
|
// 从其他视图采样
|
|
|
|
Point2f imgPoint = sourceImage.camera.ProjectPointP(worldPoint); |
|
|
|
for (size_t i = 1; i < visibleViews.size(); ++i) { // 从第2个视图开始
|
|
|
|
|
|
|
|
IIndex viewIdx = visibleViews[i]; |
|
|
|
|
|
|
|
if (viewIdx >= images.size()) continue; |
|
|
|
|
|
|
|
|
|
|
|
// 确保在图像范围内
|
|
|
|
const Image& sourceImage = images[viewIdx]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 检查可见性
|
|
|
|
|
|
|
|
if (!sourceImage.camera.IsInFront(worldPoint)) continue; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 投影
|
|
|
|
|
|
|
|
Point2f imgPoint = sourceImage.camera.ProjectPointP(worldPoint); |
|
|
|
if (imgPoint.x < 0 || imgPoint.x >= sourceImage.image.cols || |
|
|
|
if (imgPoint.x < 0 || imgPoint.x >= sourceImage.image.cols || |
|
|
|
imgPoint.y < 0 || imgPoint.y >= sourceImage.image.rows) { |
|
|
|
imgPoint.y < 0 || imgPoint.y >= sourceImage.image.rows) { |
|
|
|
continue; |
|
|
|
continue; |
|
|
|
@ -13929,137 +14217,248 @@ Mesh::Image8U3Arr MeshTexture::GenerateTextureAtlasWith3DBridge( |
|
|
|
// 采样图像
|
|
|
|
// 采样图像
|
|
|
|
Pixel8U sampledColor = SampleImageBilinear(sourceImage.image, imgPoint); |
|
|
|
Pixel8U sampledColor = SampleImageBilinear(sourceImage.image, imgPoint); |
|
|
|
|
|
|
|
|
|
|
|
// 转换为BGR顺序用于颜色累加
|
|
|
|
// 计算权重
|
|
|
|
cv::Vec3f bgrColor( |
|
|
|
float weight = 0.2f; // 默认较低权重
|
|
|
|
sampledColor.b, // B
|
|
|
|
|
|
|
|
sampledColor.g, // G
|
|
|
|
|
|
|
|
sampledColor.r // R
|
|
|
|
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 获取颜色调整因子
|
|
|
|
|
|
|
|
cv::Vec3d adjust(1.0, 1.0, 1.0); |
|
|
|
|
|
|
|
if (globalSampleCount > 0) { |
|
|
|
|
|
|
|
// 计算颜色调整因子
|
|
|
|
|
|
|
|
double avgGray = (globalMean[0] + globalMean[1] + globalMean[2]) / 3.0; |
|
|
|
|
|
|
|
if (avgGray > 0) { |
|
|
|
|
|
|
|
adjust[0] = globalMean[0] / avgGray; // B调整因子
|
|
|
|
|
|
|
|
adjust[1] = globalMean[1] / avgGray; // G调整因子
|
|
|
|
|
|
|
|
adjust[2] = globalMean[2] / avgGray; // R调整因子
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 限制调整幅度
|
|
|
|
// 计算第一个顶点在图像上的投影
|
|
|
|
const double maxAdjust = 1.2; |
|
|
|
Point2f proj0 = sourceImage.camera.ProjectPointP(vertices[face[0]]); |
|
|
|
const double minAdjust = 0.833; // 1/1.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
adjust[0] = std::max(minAdjust, std::min(adjust[0], maxAdjust)); |
|
|
|
// 计算投影分辨率
|
|
|
|
adjust[1] = std::max(minAdjust, std::min(adjust[1], maxAdjust)); |
|
|
|
double projDist = cv::norm(imgPoint - proj0); |
|
|
|
adjust[2] = std::max(minAdjust, std::min(adjust[2], maxAdjust)); |
|
|
|
float resolution = 1.0f; |
|
|
|
} |
|
|
|
if (projDist > 1e-6) { |
|
|
|
|
|
|
|
resolution = 1.0f / (float)projDist; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 应用颜色调整
|
|
|
|
// 计算视角与法线夹角
|
|
|
|
cv::Vec3f adjustedColor = bgrColor; |
|
|
|
Vertex cameraPos(sourceImage.camera.C.x, sourceImage.camera.C.y, sourceImage.camera.C.z); |
|
|
|
adjustedColor[0] *= adjust[0]; // B通道
|
|
|
|
Vertex viewDir = cameraPos - worldPoint; |
|
|
|
adjustedColor[1] *= adjust[1]; // G通道
|
|
|
|
float viewDirNorm = cv::norm(viewDir); |
|
|
|
adjustedColor[2] *= adjust[2]; // R通道
|
|
|
|
if (viewDirNorm > 0) { |
|
|
|
|
|
|
|
viewDir = viewDir / viewDirNorm; |
|
|
|
|
|
|
|
float viewAngle = std::abs(viewDir.dot(normal)); |
|
|
|
|
|
|
|
|
|
|
|
// 限制颜色范围
|
|
|
|
// 综合权重 = 分辨率 * 视角角度
|
|
|
|
adjustedColor[0] = std::max(0.0f, std::min(255.0f, adjustedColor[0])); |
|
|
|
weight = resolution * viewAngle; |
|
|
|
adjustedColor[1] = std::max(0.0f, std::min(255.0f, adjustedColor[1])); |
|
|
|
weight = std::max(0.1f, std::min(1.0f, weight * 0.5f)); // 限制权重范围
|
|
|
|
adjustedColor[2] = std::max(0.0f, std::min(255.0f, adjustedColor[2])); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 累加颜色和权重
|
|
|
|
// 存储采样
|
|
|
|
float weight = 1.0f; |
|
|
|
PixelSample sample; |
|
|
|
float& w = weightAccum(y, x); |
|
|
|
sample.color = cv::Vec3f(sampledColor.b, sampledColor.g, sampledColor.r); |
|
|
|
cv::Vec3f& c = colorAccum(y, x); |
|
|
|
sample.weight = weight; |
|
|
|
|
|
|
|
sample.viewIdx = viewIdx; |
|
|
|
#ifdef _USE_OPENMP |
|
|
|
|
|
|
|
#pragma omp atomic |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
w += weight; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#ifdef _USE_OPENMP |
|
|
|
#pragma omp critical |
|
|
|
#pragma omp critical |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
{ |
|
|
|
{ |
|
|
|
c[0] += adjustedColor[0] * weight; |
|
|
|
pixelSamples[pixelIdx].push_back(sample); |
|
|
|
c[1] += adjustedColor[1] * weight; |
|
|
|
additionalSamples++; |
|
|
|
c[2] += adjustedColor[2] * weight; |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEBUG_EXTRA("补充采样完成: 新增采样点 %d", additionalSamples); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 6. 融合多视图颜色
|
|
|
|
|
|
|
|
DEBUG_EXTRA("融合多视图颜色"); |
|
|
|
|
|
|
|
cv::Mat3f colorAccum(textureSize, textureSize, cv::Vec3f(0, 0, 0)); |
|
|
|
|
|
|
|
cv::Mat1f weightAccum(textureSize, textureSize, 0.0f); |
|
|
|
|
|
|
|
cv::Mat1i sampleCount(textureSize, textureSize, 0); |
|
|
|
|
|
|
|
int fusedPixels = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 添加统计信息
|
|
|
|
|
|
|
|
int mainViewOnly = 0; |
|
|
|
|
|
|
|
int multiViewFused = 0; |
|
|
|
|
|
|
|
int highDiffCount = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < textureSize; ++y) { |
|
|
|
|
|
|
|
for (int x = 0; x < textureSize; ++x) { |
|
|
|
|
|
|
|
int pixelIdx = y * textureSize + x; |
|
|
|
|
|
|
|
auto& samples = pixelSamples[pixelIdx]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (samples.empty()) continue; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 分离主视图和辅助视图
|
|
|
|
|
|
|
|
std::vector<cv::Vec3f> mainViewColors; |
|
|
|
|
|
|
|
std::vector<float> mainViewWeights; |
|
|
|
|
|
|
|
std::vector<cv::Vec3f> auxiliaryColors; |
|
|
|
|
|
|
|
std::vector<float> auxiliaryWeights; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
IIndex primaryView = INVALID_INDEX; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (const auto& sample : samples) { |
|
|
|
|
|
|
|
if (primaryView == INVALID_INDEX) { |
|
|
|
|
|
|
|
primaryView = sample.viewIdx; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
faceSampledPixels++; |
|
|
|
if (sample.viewIdx == primaryView) { |
|
|
|
|
|
|
|
// 主视图
|
|
|
|
|
|
|
|
mainViewColors.push_back(sample.color); |
|
|
|
|
|
|
|
mainViewWeights.push_back(sample.weight); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// 辅助视图
|
|
|
|
|
|
|
|
auxiliaryColors.push_back(sample.color); |
|
|
|
|
|
|
|
auxiliaryWeights.push_back(sample.weight * 0.3f); // 辅助视图权重更低
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (faceSampledPixels > 0) { |
|
|
|
if (mainViewColors.empty()) continue; |
|
|
|
processedFaces++; |
|
|
|
|
|
|
|
sampledPixels += faceSampledPixels; |
|
|
|
if (auxiliaryColors.empty()) { |
|
|
|
|
|
|
|
mainViewOnly++; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
failedFaces++; |
|
|
|
multiViewFused++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算主视图的加权平均颜色
|
|
|
|
|
|
|
|
cv::Vec3f mainColor(0, 0, 0); |
|
|
|
|
|
|
|
float mainWeightSum = 0.0f; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mainViewColors.size(); ++i) { |
|
|
|
|
|
|
|
mainColor += mainViewColors[i] * mainViewWeights[i]; |
|
|
|
|
|
|
|
mainWeightSum += mainViewWeights[i]; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (mainWeightSum > 0) { |
|
|
|
|
|
|
|
mainColor /= mainWeightSum; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 如果有辅助视图,进行智能融合
|
|
|
|
|
|
|
|
cv::Vec3f finalColor = mainColor; |
|
|
|
|
|
|
|
float finalWeight = 1.0f; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!auxiliaryColors.empty()) { |
|
|
|
|
|
|
|
// 计算辅助视图的加权平均颜色
|
|
|
|
|
|
|
|
cv::Vec3f auxColor(0, 0, 0); |
|
|
|
|
|
|
|
float auxWeightSum = 0.0f; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < auxiliaryColors.size(); ++i) { |
|
|
|
|
|
|
|
auxColor += auxiliaryColors[i] * auxiliaryWeights[i]; |
|
|
|
|
|
|
|
auxWeightSum += auxiliaryWeights[i]; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (auxWeightSum > 0) { |
|
|
|
|
|
|
|
auxColor /= auxWeightSum; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 计算颜色差异
|
|
|
|
|
|
|
|
cv::Vec3f colorDiff = mainColor - auxColor; |
|
|
|
|
|
|
|
float colorDiffMag = std::sqrt(colorDiff.dot(colorDiff)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 如果颜色差异不大,才融合辅助视图
|
|
|
|
|
|
|
|
if (colorDiffMag < 30.0f) { // 颜色差异阈值
|
|
|
|
|
|
|
|
// 根据颜色差异调整融合权重
|
|
|
|
|
|
|
|
float alpha = std::exp(-colorDiffMag * 0.1f); // 差异越大,融合越少
|
|
|
|
|
|
|
|
finalColor = mainColor * (1.0f - alpha) + auxColor * alpha; |
|
|
|
|
|
|
|
finalWeight = 1.0f; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
// 颜色差异太大,只使用主视图
|
|
|
|
|
|
|
|
finalColor = mainColor; |
|
|
|
|
|
|
|
finalWeight = 1.0f; |
|
|
|
|
|
|
|
highDiffCount++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 应用简单的颜色饱和度增强
|
|
|
|
|
|
|
|
float b = finalColor[0]; |
|
|
|
|
|
|
|
float g = finalColor[1]; |
|
|
|
|
|
|
|
float r = finalColor[2]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
float maxVal = std::max(r, std::max(g, b)); |
|
|
|
|
|
|
|
float minVal = std::min(r, std::min(g, b)); |
|
|
|
|
|
|
|
float delta = maxVal - minVal; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (delta > 5.0f) { // 避免对灰度像素处理
|
|
|
|
|
|
|
|
// 增强饱和度
|
|
|
|
|
|
|
|
float saturationBoost = 1.2f; // 20%饱和度增强
|
|
|
|
|
|
|
|
float mean = (r + g + b) / 3.0f; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
r = mean + (r - mean) * saturationBoost; |
|
|
|
|
|
|
|
g = mean + (g - mean) * saturationBoost; |
|
|
|
|
|
|
|
b = mean + (b - mean) * saturationBoost; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 限制在有效范围
|
|
|
|
|
|
|
|
r = std::max(0.0f, std::min(255.0f, r)); |
|
|
|
|
|
|
|
g = std::max(0.0f, std::min(255.0f, g)); |
|
|
|
|
|
|
|
b = std::max(0.0f, std::min(255.0f, b)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
finalColor = cv::Vec3f(b, g, r); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 存储结果
|
|
|
|
|
|
|
|
colorAccum(y, x) = finalColor; |
|
|
|
|
|
|
|
weightAccum(y, x) = finalWeight; |
|
|
|
|
|
|
|
sampleCount(y, x) = (int)samples.size(); |
|
|
|
|
|
|
|
fusedPixels++; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 输出进度
|
|
|
|
|
|
|
|
if (y % 100 == 0) { |
|
|
|
|
|
|
|
float progress = (float)y * 100.0f / textureSize; |
|
|
|
|
|
|
|
DEBUG_EXTRA("颜色融合进度: %.1f%% (处理行 %d/%d)", progress, y, textureSize); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
DEBUG_EXTRA("纹理采样完成: 成功 %d 个面, 失败 %d 个面, 采样 %d 像素", |
|
|
|
DEBUG_EXTRA("颜色融合完成: 融合像素 %d", fusedPixels); |
|
|
|
processedFaces, failedFaces, sampledPixels); |
|
|
|
DEBUG_EXTRA("融合统计: 仅主视图=%d, 多视图融合=%d, 高差异=%d", |
|
|
|
|
|
|
|
mainViewOnly, multiViewFused, highDiffCount); |
|
|
|
|
|
|
|
|
|
|
|
// 6. 应用权重归一化
|
|
|
|
// 7. 生成最终纹理
|
|
|
|
DEBUG_EXTRA("应用权重归一化"); |
|
|
|
DEBUG_EXTRA("生成最终纹理"); |
|
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < textureSize; ++y) { |
|
|
|
for (int y = 0; y < textureSize; ++y) { |
|
|
|
for (int x = 0; x < textureSize; ++x) { |
|
|
|
for (int x = 0; x < textureSize; ++x) { |
|
|
|
float weight = weightAccum(y, x); |
|
|
|
float weight = weightAccum(y, x); |
|
|
|
if (weight > 0.0f) { |
|
|
|
if (weight > 0.0f) { |
|
|
|
cv::Vec3f avgColor = colorAccum(y, x) / weight; |
|
|
|
cv::Vec3f bgrColor = colorAccum(y, x); |
|
|
|
|
|
|
|
|
|
|
|
// 从BGR转回RGB顺序
|
|
|
|
// 转换为RGB顺序
|
|
|
|
Pixel8U finalColor; |
|
|
|
Pixel8U pixel; |
|
|
|
finalColor.r = (unsigned char)cv::saturate_cast<uchar>(avgColor[2]); // R
|
|
|
|
pixel.r = (unsigned char)cv::saturate_cast<uchar>(bgrColor[2]); // R
|
|
|
|
finalColor.g = (unsigned char)cv::saturate_cast<uchar>(avgColor[1]); // G
|
|
|
|
pixel.g = (unsigned char)cv::saturate_cast<uchar>(bgrColor[1]); // G
|
|
|
|
finalColor.b = (unsigned char)cv::saturate_cast<uchar>(avgColor[0]); // B
|
|
|
|
pixel.b = (unsigned char)cv::saturate_cast<uchar>(bgrColor[0]); // B
|
|
|
|
|
|
|
|
|
|
|
|
textureAtlas(y, x) = finalColor; |
|
|
|
textureAtlas(y, x) = pixel; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
textureAtlas(y, x) = colEmpty; |
|
|
|
textureAtlas(y, x) = colEmpty; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 7. 应用全局颜色校正
|
|
|
|
// 8. 应用颜色一致性优化
|
|
|
|
if (processedFaces > 0 && sampledPixels > 0) { |
|
|
|
if (fusedPixels > 0) { |
|
|
|
ApplyGlobalColorCorrection(textureAtlas, colEmpty, 0.8f); |
|
|
|
DEBUG_EXTRA("应用颜色一致性优化"); |
|
|
|
|
|
|
|
ApplyColorConsistencyOptimization(textureAtlas, pixelSamples, textureSize, colEmpty); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// 8. 锐化处理
|
|
|
|
// 9. 填充空白区域
|
|
|
|
if (fSharpnessWeight > 0 && sampledPixels > 0) { |
|
|
|
// FillTextureGaps(textureAtlas, colEmpty);
|
|
|
|
DEBUG_EXTRA("应用锐化处理"); |
|
|
|
|
|
|
|
ApplySoftSharpening(textureAtlas, fSharpnessWeight, colEmpty); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// 9. 最终颜色检查
|
|
|
|
// 10. 应用轻微的颜色校正
|
|
|
|
if (!textureAtlas.empty()) { |
|
|
|
if (fusedPixels > 0) { |
|
|
|
cv::Scalar mean = cv::mean(textureAtlas); |
|
|
|
ApplyGlobalColorCorrection(textureAtlas, colEmpty, 0.3f); |
|
|
|
DEBUG_EXTRA("最终纹理平均颜色: B=%.1f, G=%.1f, R=%.1f (OpenCV BGR顺序)", |
|
|
|
} |
|
|
|
mean[0], mean[1], mean[2]); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
double rSum = 0, gSum = 0, bSum = 0; |
|
|
|
// 11. 锐化处理
|
|
|
|
int count = 0; |
|
|
|
if (fSharpnessWeight > 0 && fusedPixels > 0) { |
|
|
|
|
|
|
|
ApplySoftSharpening(textureAtlas, fSharpnessWeight, colEmpty); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for (int y = 0; y < textureAtlas.rows; ++y) { |
|
|
|
// 12. 最终统计
|
|
|
|
for (int x = 0; x < textureAtlas.cols; ++x) { |
|
|
|
int validPixels = 0; |
|
|
|
Pixel8U pixel = textureAtlas(y, x); |
|
|
|
for (int y = 0; y < textureSize; ++y) { |
|
|
|
if (pixel != colEmpty) { |
|
|
|
for (int x = 0; x < textureSize; ++x) { |
|
|
|
rSum += pixel.r; |
|
|
|
if (textureAtlas(y, x) != colEmpty) { |
|
|
|
gSum += pixel.g; |
|
|
|
validPixels++; |
|
|
|
bSum += pixel.b; |
|
|
|
|
|
|
|
count++; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (count > 0) { |
|
|
|
DEBUG_EXTRA("纹理图集生成完成: 有效像素 %d / %d (%.2f%%)", |
|
|
|
DEBUG_EXTRA("有效像素RGB平均值: R=%.1f, G=%.1f, B=%.1f", |
|
|
|
validPixels, textureSize * textureSize, |
|
|
|
rSum / count, gSum / count, bSum / count); |
|
|
|
(float)validPixels * 100 / (textureSize * textureSize)); |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return textures; |
|
|
|
return textures; |
|
|
|
} |
|
|
|
} |
|
|
|
|