Browse Source

test124

master
hesuicong 5 months ago
parent
commit
4652e3c65c
  1. 446
      scripts/python/MvgMvsPipeline.py
  2. 77
      scripts/python/MvgOptimizeSfM.py
  3. 42
      scripts/python/MvsReadDMAP.py
  4. 35
      scripts/python/MvsReadMVS.py
  5. 180
      scripts/python/MvsScalablePipeline.py
  6. 190
      scripts/python/MvsUtils.py
  7. 126
      scripts/python/blender_decimate.py
  8. 10
      scripts/python/run.sh

446
scripts/python/MvgMvsPipeline.py

@ -0,0 +1,446 @@
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
#
# Created by @FlachyJoe
"""
This script is for an easy use of OpenMVG, COLMAP, and OpenMVS
usage: MvgMvs_Pipeline.py [-h] [--steps STEPS [STEPS ...]] [--preset PRESET]
[--0 0 [0 ...]] [--1 1 [1 ...]] [--2 2 [2 ...]]
[--3 3 [3 ...]] [--4 4 [4 ...]] [--5 5 [5 ...]]
[--6 6 [6 ...]] [--7 7 [7 ...]] [--8 8 [8 ...]]
[--9 9 [9 ...]] [--10 10 [10 ...]] [--11 11 [11 ...]]
[--12 12 [12 ...]] [--13 13 [13 ...]] [--14 14 [14 ...]]
[--15 15 [15 ...]] [--16 16 [16 ...]] [--17 17 [17 ...]]
[--18 18 [18 ...]] [--19 19 [19 ...]] [--20 20 [20 ...]]
[--21 21 [21 ...]] [--22 22 [22 ...]]
input_dir output_dir
Photogrammetry reconstruction with these steps:
0. Intrinsics analysis openMVG_main_SfMInit_ImageListing
1. Compute features openMVG_main_ComputeFeatures
2. Compute pairs openMVG_main_PairGenerator
3. Compute matches openMVG_main_ComputeMatches
4. Filter matches openMVG_main_GeometricFilter
5. Incremental reconstruction openMVG_main_SfM
6. Global reconstruction openMVG_main_SfM
7. Colorize Structure openMVG_main_ComputeSfM_DataColor
8. Structure from Known Poses openMVG_main_ComputeStructureFromKnownPoses
9. Colorized robust triangulation openMVG_main_ComputeSfM_DataColor
10. Control Points Registration ui_openMVG_control_points_registration
11. Export to openMVS openMVG_main_openMVG2openMVS
12. Feature Extractor colmap
13. Exhaustive Matcher colmap
14. Mapper colmap
15. Image Undistorter colmap
16. Export to openMVS InterfaceCOLMAP
17. Densify point-cloud DensifyPointCloud
18. Reconstruct the mesh ReconstructMesh
19. Refine the mesh RefineMesh
20. Texture the mesh TextureMesh
21. Estimate disparity-maps DensifyPointCloud
22. Fuse disparity-maps DensifyPointCloud
positional arguments:
input_dir the directory which contains the pictures set.
output_dir the directory which will contain the resulting files.
optional arguments:
-h, --help show this help message and exit
--steps STEPS [STEPS ...] steps to process
--preset PRESET steps list preset in
SEQUENTIAL = [0, 1, 2, 3, 4, 5, 11, 17, 18, 19, 20]
GLOBAL = [0, 1, 2, 3, 4, 6, 11, 17, 18, 19, 20]
MVG_SEQ = [0, 1, 2, 3, 4, 5, 7, 8, 9, 11]
MVG_GLOBAL = [0, 1, 2, 3, 4, 6, 7, 8, 9, 11]
COLMAP_MVS = [12, 13, 14, 15, 16, 17, 18, 19, 20]
COLMAP = [12, 13, 14, 15, 16]
MVS = [17, 18, 19, 20]
MVS_SGM = [21, 22]
default : SEQUENTIAL
Passthrough:
Option to be passed to command lines (remove - in front of option names)
e.g. --1 p ULTRA to use the ULTRA preset in openMVG_main_ComputeFeatures
For example, running the script
[MvgMvsPipeline.py input_dir output_dir --steps 0 1 2 3 4 5 11 17 18 20 --1 p HIGH n 8 --3 n HNSWL2]
[--steps 0 1 2 3 4 5 11 17 18 20] runs only the desired steps
[--1 p HIGH n 8] where --1 refer to openMVG_main_ComputeFeatures,
p refers to describerPreset option and set to HIGH, and n refers
to numThreads and set to 8. The second step (Compute matches),
[--3 n HNSWL2] where --3 refer to openMVG_main_ComputeMatches,
n refers to nearest_matching_method option and set to HNSWL2
"""
import os
import subprocess
import sys
import argparse
# Enable debug mode conditionally
DEBUGDEBUG_MODE_ENABLED = False
# Define delimiters based on the operating system
PATH_DELIMITER = ';' if sys.platform.startswith('win') else ':'
FOLDER_DELIMITER = '\\' if sys.platform.startswith('win') else '/'
# Append the script's and current working directory's paths to the system PATH
script_directory = os.path.dirname(os.path.abspath(__file__))
# add this script's directory to PATH
os.environ['PATH'] += PATH_DELIMITER + script_directory
# add current directory to PATH
os.environ['PATH'] += PATH_DELIMITER + os.getcwd()
def locate_executable(afile):
"""
return directory in which afile is, None if not found. Look in PATH
Attempts to find the directory containing the executable specified by 'file_name'.
Uses 'where' command on Windows and 'which' on other platforms.
Returns the directory path if found, None otherwise.
"""
command = "where" if sys.platform.startswith('win') else "which"
try:
process_result = subprocess.run([command, afile], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
return os.path.split(process_result.stdout.decode())[0]
except subprocess.CalledProcessError:
return None
def find_file_in_path(afile):
"""
As whereis look only for executable on linux, this find look for all file type
Searches for 'file_name' in all directories specified in the PATH environment variable.
Returns the first directory containing the file, or None if not found.
"""
for directory in os.environ['PATH'].split(PATH_DELIMITER):
if os.path.isfile(os.path.join(directory, afile)):
return directory
return None
# Attempt to locate binaries for specific software tools
openMVG_binary = locate_executable("openMVG_main_SfMInit_ImageListing")
colmap_binary = locate_executable("colmap")
openMVS_binary = locate_executable("ReconstructMesh")
# Try to find openMVG camera sensor database
camera_sensor_db_file = "sensor_width_camera_database.txt"
camera_sensor_db_directory = find_file_in_path(camera_sensor_db_file)
# Prompt user for directories if software binaries or database files weren't found
if not openMVG_binary:
openMVG_binary = input("Directory for openMVG binaries?\n")
if not colmap_binary:
colmap_binary = input("Directory for COLMAP binaries?\n")
if not openMVS_binary:
openMVS_binary = input("Directory for openMVS binaries?\n")
if not camera_sensor_db_directory:
camera_sensor_db_directory = input(f"Directory for the openMVG camera database ({camera_sensor_db_file})?\n")
colmap_binary = os.path.join(colmap_binary, "colmap")
# Adjust the binary name for COLMAP on Windows
if sys.platform.startswith('win'):
colmap_binary += ".bat"
# Define presets for various software tools
SOFTWARE_PRESETS = {'SEQUENTIAL': [0, 1, 2, 3, 4, 5, 11, 17, 18, 19, 20],
'GLOBAL': [0, 1, 2, 3, 4, 6, 11, 17, 18, 19, 20],
'MVG_SEQ': [0, 1, 2, 3, 4, 5, 7, 8, 9, 11],
'MVG_GLOBAL': [0, 1, 2, 3, 4, 6, 7, 8, 9, 11],
'COLMAP_MVS': [12, 13, 14, 15, 16, 17, 18, 19, 20],
'COLMAP': [12, 13, 14, 15, 16],
'MVS': [17, 18, 19, 20],
'MVS_SGM': [21, 22],
'TEXTURE': [20]}
# Default preset selection
PRESET_DEFAULT = 'COLMAP'
# HELPERS for terminal colors
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
NO_EFFECT, BOLD, UNDERLINE, BLINK, INVERSE, HIDDEN = (0, 1, 4, 5, 7, 8)
# from Python cookbook, #475186
def has_colors(stream):
'''
Return stream colours capability
Checks if the given stream supports colors.
Returns True if it does, False otherwise.
'''
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
HAS_COLORS = has_colors(sys.stdout)
def printout(text, colour=WHITE, background=BLACK, effect=NO_EFFECT):
"""
print() with colour
"""
if HAS_COLORS:
seq = "\x1b[%d;%d;%dm" % (effect, 30+colour, 40+background) + text + "\x1b[0m"
sys.stdout.write(seq+'\r\n')
else:
sys.stdout.write(text+'\r\n')
# OBJECTS to store config and data in
class ConfigContainer:
"""
Container for storing configuration variables.
"""
def __init__(self):
pass
class ProcessStep:
"""
Represents a step in the processing pipeline, storing necessary information to execute it.
"""
def __init__(self, description, command, options):
self.info = description
self.cmd = command
self.opt = options
class StepsStore:
""" List of steps with facilities to configure them """
def __init__(self):
self.steps_data = [
["Intrinsics analysis", # 0
os.path.join(openMVG_binary, "openMVG_main_SfMInit_ImageListing"),
["-i", "%input_dir%", "-o", "%matches_dir%", "-d", "%camera_file_params%"]],
["Compute features", # 1
os.path.join(openMVG_binary, "openMVG_main_ComputeFeatures"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-o", "%matches_dir%", "-m", "SIFT"]],
["Compute pairs", # 2
os.path.join(openMVG_binary, "openMVG_main_PairGenerator"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-o", "%matches_dir%"+FOLDER_DELIMITER+"pairs.bin"]],
["Compute matches", # 3
os.path.join(openMVG_binary, "openMVG_main_ComputeMatches"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-p", "%matches_dir%"+FOLDER_DELIMITER+"pairs.bin", "-o", "%matches_dir%"+FOLDER_DELIMITER+"matches.putative.bin", "-n", "AUTO"]],
["Filter matches", # 4
os.path.join(openMVG_binary, "openMVG_main_GeometricFilter"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-m", "%matches_dir%"+FOLDER_DELIMITER+"matches.putative.bin", "-o", "%matches_dir%"+FOLDER_DELIMITER+"matches.f.bin"]],
["Incremental reconstruction", # 5
os.path.join(openMVG_binary, "openMVG_main_SfM"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-m", "%matches_dir%", "-o", "%reconstruction_dir%", "-s", "INCREMENTAL"]],
["Global reconstruction", # 6
os.path.join(openMVG_binary, "openMVG_main_SfM"),
["-i", "%matches_dir%"+FOLDER_DELIMITER+"sfm_data.json", "-m", "%matches_dir%", "-o", "%reconstruction_dir%", "-s", "GLOBAL", "-M", "%matches_dir%"+FOLDER_DELIMITER+"matches.e.bin"]],
["Colorize Structure", # 7
os.path.join(openMVG_binary, "openMVG_main_ComputeSfM_DataColor"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"sfm_data.bin", "-o", "%reconstruction_dir%"+FOLDER_DELIMITER+"colorized.ply"]],
["Structure from Known Poses", # 8
os.path.join(openMVG_binary, "openMVG_main_ComputeStructureFromKnownPoses"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"sfm_data.bin", "-m", "%matches_dir%", "-f", "%matches_dir%"+FOLDER_DELIMITER+"matches.f.bin", "-o", "%reconstruction_dir%"+FOLDER_DELIMITER+"robust.bin"]],
["Colorized robust triangulation", # 9
os.path.join(openMVG_binary, "openMVG_main_ComputeSfM_DataColor"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"robust.bin", "-o", "%reconstruction_dir%"+FOLDER_DELIMITER+"robust_colorized.ply"]],
["Control Points Registration", # 10
os.path.join(openMVG_binary, "ui_openMVG_control_points_registration"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"sfm_data.bin"]],
["Export to openMVS", # 11
os.path.join(openMVG_binary, "openMVG_main_openMVG2openMVS"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"sfm_data.bin", "-o", "%mvs_dir%"+FOLDER_DELIMITER+"scene.mvs", "-d", "%mvs_dir%"+FOLDER_DELIMITER+"images"]],
["Feature Extractor", # 12
colmap_binary,
["feature_extractor", "--database_path", "%matches_dir%"+FOLDER_DELIMITER+"database.db", "--image_path", "%input_dir%"]],
["Exhaustive Matcher", # 13
colmap_binary,
["exhaustive_matcher", "--database_path", "%matches_dir%"+FOLDER_DELIMITER+"database.db"]],
["Mapper", # 14
colmap_binary,
["mapper", "--database_path", "%matches_dir%"+FOLDER_DELIMITER+"database.db", "--image_path", "%input_dir%", "--output_path", "%reconstruction_dir%"]],
["Image Undistorter", # 15
colmap_binary,
["image_undistorter", "--image_path", "%input_dir%", "--input_path", "%reconstruction_dir%"+FOLDER_DELIMITER+"0", "--output_path", "%reconstruction_dir%"+FOLDER_DELIMITER+"dense", "--output_type", "COLMAP"]],
["Export to openMVS", # 16
os.path.join(openMVS_binary, "InterfaceCOLMAP"),
["-i", "%reconstruction_dir%"+FOLDER_DELIMITER+"dense", "-o", "scene.mvs", "--image-folder", "%reconstruction_dir%"+FOLDER_DELIMITER+"dense"+FOLDER_DELIMITER+"images", "-w", "\"%mvs_dir%\""]],
["Densify point cloud", # 17
os.path.join(openMVS_binary, "DensifyPointCloud"),
["scene.mvs", "--dense-config-file", "Densify.ini", "--resolution-level", "1", "--number-views", "8", "-w", "\"%mvs_dir%\""]],
["Reconstruct the mesh", # 18
os.path.join(openMVS_binary, "ReconstructMesh"),
["scene_dense.mvs", "-p", "scene_dense.ply", "-w", "\"%mvs_dir%\""]],
["Refine the mesh", # 19
os.path.join(openMVS_binary, "RefineMesh"),
["scene_dense.mvs", "-m", "scene_dense_mesh.ply", "-o", "scene_dense_mesh_refine.mvs", "--scales", "1", "--gradient-step", "25.05", "-w", "\"%mvs_dir%\""]],
["Texture the mesh", # 20
os.path.join(openMVS_binary, "TextureMesh"),
["scene_dense.mvs", "-m", "scene_dense_mesh_refine.ply", "-o", "scene_dense_mesh_refine_texture.mvs", "--decimate", "0.5", "-w", "\"%mvs_dir%\""]],
["Estimate disparity-maps", # 21
os.path.join(openMVS_binary, "DensifyPointCloud"),
["scene.mvs", "--dense-config-file", "Densify.ini", "--fusion-mode", "-1", "-w", "\"%mvs_dir%\""]],
["Fuse disparity-maps", # 22
os.path.join(openMVS_binary, "DensifyPointCloud"),
["scene.mvs", "--dense-config-file", "Densify.ini", "--fusion-mode", "-2", "-w", "\"%mvs_dir%\""]]
]
def __getitem__(self, indice):
return ProcessStep(*self.steps_data[indice])
def length(self):
return len(self.steps_data)
def configure_steps(self, conf):
""" replace each %var% per conf.var value in steps data """
for step in self.steps_data:
updated_options = []
for option in step[2]:
configured_option = option.replace("%input_dir%", conf.input_dir)
configured_option = configured_option.replace("%output_dir%", conf.output_dir)
configured_option = configured_option.replace("%matches_dir%", conf.matches_dir)
configured_option = configured_option.replace("%reconstruction_dir%", conf.reconstruction_dir)
configured_option = configured_option.replace("%mvs_dir%", conf.mvs_dir)
configured_option = configured_option.replace("%camera_file_params%", conf.camera_file_params)
updated_options.append(configured_option)
step[2] = updated_options
def replace_option(self, idx, str_exist, str_new):
""" replace each existing str_exist with str_new per opt value in step idx data """
step = self.steps_data[idx]
updated_options = []
for option in step[2]:
configured_option = option.replace(str_exist, str_new)
updated_options.append(configured_option)
step[2] = updated_options
CONF = ConfigContainer()
STEPS = StepsStore()
# ARGS
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Photogrammetry reconstruction with these steps: \r\n" +
"\r\n".join(("\t%i. %s\t %s" % (t, STEPS[t].info, STEPS[t].cmd) for t in range(STEPS.length())))
)
PARSER.add_argument('input_dir',
help="the directory which contains the pictures set.")
PARSER.add_argument('output_dir',
help="the directory which will contain the resulting files.")
PARSER.add_argument('--steps',
type=int,
nargs="+",
help="Specify steps to process by index.")
PARSER.add_argument('--preset',
help="steps list preset in \r\n" +
" \r\n".join([k + " = " + str(SOFTWARE_PRESETS[k]) for k in SOFTWARE_PRESETS]) +
" \r\ndefault : " + PRESET_DEFAULT)
GROUP = PARSER.add_argument_group('Passthrough', description="Option to be passed to command lines (remove - in front of option names)\r\ne.g. --1 p ULTRA to use the ULTRA preset in openMVG_main_ComputeFeatures\r\nFor example, running the script as follows,\r\nMvgMvsPipeline.py input_dir output_dir --1 p HIGH n 8 --3 n ANNL2\r\nwhere --1 refer to openMVG_main_ComputeFeatures, p refers to\r\ndescriberPreset option which HIGH was chosen, and n refers to\r\nnumThreads which 8 was used. --3 refer to second step (openMVG_main_ComputeMatches),\r\nn refers to nearest_matching_method option which ANNL2 was chosen")
for n in range(STEPS.length()):
GROUP.add_argument('--'+str(n), nargs='+')
PARSER.parse_args(namespace=CONF) # store args in the ConfContainer
# FOLDERS
# Method to ensure a directory exists; creates it if it does not
def ensure_directory_exists(dirname):
"""Create the folder if not presents"""
if not os.path.exists(dirname):
os.mkdir(dirname)
# Absolute path for input and output dirs
CONF.input_dir = os.path.abspath(CONF.input_dir)
CONF.output_dir = os.path.abspath(CONF.output_dir)
if not os.path.exists(CONF.input_dir):
sys.exit("%s: path not found" % CONF.input_dir)
CONF.reconstruction_dir = os.path.join(CONF.output_dir, "sfm")
CONF.matches_dir = os.path.join(CONF.reconstruction_dir, "matches")
CONF.mvs_dir = os.path.join(CONF.output_dir, "mvs")
CONF.camera_file_params = os.path.join(camera_sensor_db_directory, camera_sensor_db_file)
ensure_directory_exists(CONF.output_dir)
ensure_directory_exists(CONF.reconstruction_dir)
ensure_directory_exists(CONF.matches_dir)
ensure_directory_exists(CONF.mvs_dir)
# Update directories in steps commandlines
STEPS.configure_steps(CONF)
# PRESET
if CONF.steps and CONF.preset:
sys.exit("Steps and preset arguments can't be set together.")
elif CONF.preset:
try:
CONF.steps = SOFTWARE_PRESETS[CONF.preset]
except KeyError:
sys.exit("Unknown preset %s, choose %s" % (CONF.preset, ' or '.join([s for s in SOFTWARE_PRESETS])))
elif not CONF.steps:
CONF.steps = SOFTWARE_PRESETS[PRESET_DEFAULT]
# WALK
print("# Using input dir: %s" % CONF.input_dir)
print("# output dir: %s" % CONF.output_dir)
print("# Steps: %s" % str(CONF.steps))
if 4 in CONF.steps: # GeometricFilter
if 6 in CONF.steps: # GlobalReconstruction
# Set the geometric_model of ComputeMatches to Essential
STEPS.replace_option(4, FOLDER_DELIMITER+"matches.f.bin", FOLDER_DELIMITER+"matches.e.bin")
STEPS[4].opt.extend(["-g", "e"])
if 20 in CONF.steps: # TextureMesh
if 19 not in CONF.steps: # RefineMesh
# RefineMesh step is not run, use ReconstructMesh output
STEPS.replace_option(20, "scene_dense_mesh_refine.ply", "scene_dense_mesh.ply")
STEPS.replace_option(20, "scene_dense_mesh_refine_texture.mvs", "scene_dense_mesh_texture.mvs")
for cstep in CONF.steps:
printout("#%i. %s" % (cstep, STEPS[cstep].info), effect=INVERSE)
# Retrieve "passthrough" commandline options
options = getattr(CONF, str(cstep))
if options:
# add - sign to short options and -- to long ones
for o in range(0, len(options), 2):
if len(options[o]) > 1:
options[o] = '-' + options[o]
options[o] = '-' + options[o]
else:
options = []
# Remove STEPS[cstep].opt options now defined in opt
for anOpt in STEPS[cstep].opt:
if anOpt in options:
idx = STEPS[cstep].opt.index(anOpt)
if DEBUGDEBUG_MODE_ENABLED:
print('#\tRemove ' + str(anOpt) + ' from defaults options at id ' + str(idx))
del STEPS[cstep].opt[idx:idx+2]
# create a commandline for the current step
cmdline = [STEPS[cstep].cmd] + STEPS[cstep].opt + options
print('CMD: ' + ' '.join(cmdline))
if not DEBUGDEBUG_MODE_ENABLED:
# Launch the current step
try:
pStep = subprocess.Popen(cmdline)
pStep.wait()
if pStep.returncode != 0:
break
except KeyboardInterrupt:
sys.exit('\r\nProcess canceled by user, all files remains')
else:
print('\t'.join(cmdline))
printout("# Pipeline end #", effect=INVERSE)

77
scripts/python/MvgOptimizeSfM.py

@ -0,0 +1,77 @@
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""
This script is for comparing the poses stored in an OpenMVS project to the poses optimized by OpenMVG
usage: run 'MvgOptimizeSfM.py' in a sub-folder to the OpenMVS project folder containing
'scene.mvs' and images stored in 'images' folder; structure ex:
-OpenMVS_project
-images
-scene.mvs
-mvg
-run script here
"""
import os
import sys
import subprocess
if sys.platform.startswith('win'):
PATH_DELIM = ';'
else:
PATH_DELIM = ':'
# add this script's directory to PATH
os.environ['PATH'] += PATH_DELIM + os.path.dirname(os.path.abspath(__file__))
# add current directory to PATH
os.environ['PATH'] += PATH_DELIM + os.getcwd()
def whereis(afile):
"""
return directory in which afile is, None if not found. Look in PATH
"""
if sys.platform.startswith('win'):
cmd = "where"
else:
cmd = "which"
try:
ret = subprocess.run([cmd, afile], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
return os.path.split(ret.stdout.decode())[0]
except subprocess.CalledProcessError:
return None
def launch(cmdline):
# Launch the current step
print('Cmd: ' + ' '.join(cmdline))
try:
pStep = subprocess.Popen(cmdline)
pStep.wait()
if pStep.returncode != 0:
return
except KeyboardInterrupt:
sys.exit('\r\nProcess canceled by user, all files remains')
# Try to find openMVG and openMVS binaries in PATH
OPENMVG_BIN = whereis("openMVG_main_SfMInit_ImageListing")
OPENMVS_BIN = whereis("ReconstructMesh")
# Ask user for openMVG and openMVS directories if not found
if not OPENMVG_BIN:
OPENMVG_BIN = input("openMVG binary folder?\n")
if not OPENMVS_BIN:
OPENMVS_BIN = input("openMVS binary folder?\n")
launch([os.path.join(OPENMVS_BIN, 'InterfaceCOLMAP'), '../scene.mvs', '-o', '../gt_dense_cameras.camera'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_SfMInit_ImageListingFromKnownPoses'), '-i', '../images', '-g', '../gt_dense_cameras', '-t', '1', '-o', '.'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_ComputeFeatures'), '-i', 'sfm_data.json', '-o', '.'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_ComputeMatches'), '-i', 'sfm_data.json', '-o', '.', '-m', '1'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_ComputeStructureFromKnownPoses'), '-i', 'sfm_data.json', '-m', '.', '-o', 'sfm_data_struct.bin', '-b'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_ComputeSfM_DataColor'), '-i', 'sfm_data_struct.bin', '-o', 'scene.ply'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_openMVG2openMVS'), '-i', 'sfm_data_struct.bin', '-o', 'scene.mvs', '-d', 'images'])
launch([os.path.join(OPENMVS_BIN, 'InterfaceCOLMAP'), 'scene.mvs', '-o', 'cameras.camera'])
launch([os.path.join(OPENMVG_BIN, 'openMVG_main_evalQuality'), '-i', '..', '-c', 'sfm_data_struct.bin', '-o', 'compare'])

42
scripts/python/MvsReadDMAP.py

@ -0,0 +1,42 @@
'''
Example usage of MvsUtils.py for reading DMAP file content.
usage: MvsReadDMAP.py [-h] [--input INPUT] [--output OUTPUT]
'''
from argparse import ArgumentParser
from concurrent.futures import ProcessPoolExecutor
from glob import glob
from MvsUtils import loadDMAP
import numpy as np
import os
import pyvips
def exportDMAPContent(dmap_path):
dmap = loadDMAP(dmap_path)
basename = os.path.splitext(os.path.basename(dmap['file_name']))[0]
pyvips.Image.new_from_array(np.uint8(dmap['depth_map'] * (1 / dmap['depth_max']) * 255)).write_to_file('%s_depth_map.png' % basename)
if dmap['has_normal']:
pyvips.Image.new_from_array(np.uint8((dmap['normal_map'] @ -dmap['R'] + 1) * 0.5 * 255)).write_to_file('%s_normal_map.png' % basename)
if dmap['has_conf']:
pyvips.Image.new_from_array(np.uint8(dmap['confidence_map'] * (1 / dmap['confidence_map'].max()) * 255)).write_to_file('%s_confidence_map.png' % basename)
def main():
parser = ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the DMAP file directory')
parser.add_argument('-t', '--threads', type=int, default=int(os.cpu_count() * 0.5) - 1, help='Number of parallel computations')
parser.add_argument('-o', '--output', type=str, required=True, help='Path to the output directory')
args = parser.parse_args()
dmap_paths = glob(os.path.join(args.input, '*.dmap'))
os.makedirs(args.output, exist_ok = True)
os.chdir(args.output)
with ProcessPoolExecutor(max_workers=args.threads) as executor:
executor.map(exportDMAPContent, dmap_paths)
if __name__ == '__main__':
main()

35
scripts/python/MvsReadMVS.py

@ -0,0 +1,35 @@
'''
Example usage of MvsUtils.py for reading MVS interface archive content.
usage: MvsReadMVS.py [-h] [--input INPUT] [--output OUTPUT]
'''
from argparse import ArgumentParser
import json
from MvsUtils import loadMVSInterface
import os
def main():
parser = ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the MVS interface archive')
parser.add_argument('-o', '--output', type=str, required=True, help='Path to the output json file')
args = parser.parse_args()
mvs = loadMVSInterface(args.input)
for platform_index in range(len(mvs['platforms'])):
for camera_index in range(len(mvs['platforms'][platform_index]['cameras'])):
camera = mvs['platforms'][platform_index]['cameras'][camera_index]
image_max = max(camera['width'], camera['height'])
fx = camera['K'][0][0] / image_max
fy = camera['K'][1][1] / image_max
poses_size = len(camera['poses'])
print('Camera model loaded: platform {}; camera {}; f {:.3f}x{:.3f}; poses {}'.format(platform_index, camera_index, fx, fy, poses_size))
os.makedirs(os.path.dirname(args.output), exist_ok = True)
with open(args.output, 'w') as file:
json.dump(mvs, file, indent=2)
if __name__ == '__main__':
main()

180
scripts/python/MvsScalablePipeline.py

@ -0,0 +1,180 @@
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
"""
This script helps with OpenMVS scalable pipeline.
Starting from a SfM solution stored into a MVS project accompanied by the undistorted images,
the fist step is to compute all depth maps without fusion:
DensifyPointCloud scene.mvs --fusion-mode 1
Next split the scene in sub-scenes using the area parameter, which is related to the inverse of GSD;
it is a bit non intuitive, but normally it should remain constant for a desired memory limit;
for example you can use the bellow value to limit memory usage to ~16GB:
DensifyPointCloud scene.mvs --sub-scene-area 660000
disable depth-maps re-filtering by creating a file Densify.ini with just this line:
Optimize = 0
and call fusion on each of the sub-scenes like:
DensifyPointCloud scene_0000.mvs --dense-config-file Densify.ini
............
DensifyPointCloud scene_000n.mvs --dense-config-file Densify.ini
This script helps to automate the process of calling DensifyPointCloud/ReconstructMesh on all sub-scenes.
usage: MvsScalablePipeline.py openMVS_module input_scene <options>
ex: MvsScalablePipeline.py DensifyPointCloud scene_XXXX.mvs --number-views-fuse 2
"""
import os
import subprocess
import sys
import argparse
import glob
DEBUG = False
if sys.platform.startswith('win'):
PATH_DELIM = ';'
FOLDER_DELIM = '\\'
else:
PATH_DELIM = ':'
FOLDER_DELIM = '/'
def whereis(afile):
"""
return directory in which afile is, None if not found. Look in PATH
"""
if sys.platform.startswith('win'):
cmd = "where"
else:
cmd = "which"
try:
ret = subprocess.run([cmd, afile], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
return os.path.split(ret.stdout.decode())[0]
except subprocess.CalledProcessError:
return None
def find(afile):
"""
As whereis look only for executable on linux, this find look for all file type
"""
for d in os.environ['PATH'].split(PATH_DELIM):
if os.path.isfile(os.path.join(d, afile)):
return d
return None
# Try to find openMVS binaries in PATH
OPENMVS_BIN = whereis("ReconstructMesh")
# Ask user for openMVS directory if not found
if not OPENMVS_BIN:
OPENMVS_BIN = input("openMVS binary folder?\n")
# HELPERS for terminal colors
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
NO_EFFECT, BOLD, UNDERLINE, BLINK, INVERSE, HIDDEN = (0, 1, 4, 5, 7, 8)
# from Python cookbook, #475186
def has_colours(stream):
'''
Return stream colours capability
'''
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
HAS_COLOURS = has_colours(sys.stdout)
def printout(text, colour=WHITE, background=BLACK, effect=NO_EFFECT):
"""
print() with colour
"""
if HAS_COLOURS:
seq = "\x1b[%d;%d;%dm" % (effect, 30+colour, 40+background) + text + "\x1b[0m"
sys.stdout.write(seq+'\r\n')
else:
sys.stdout.write(text+'\r\n')
# store config and data in
class ConfContainer:
"""
Container for all the config variables
"""
def __init__(self):
pass
CONF = ConfContainer()
# ARGS
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Scalable MVS reconstruction with these steps: \r\n" +
"MvsScalablePipeline.py openMVS_module input_scene <options>\r\n"
)
PARSER.add_argument('openMVS_module',
help="the OpenMVS module to use: DensifyPointCloud, ReconstructMesh, etc.")
PARSER.add_argument('input_scene',
help="the scene name reg to process: scene_XXXX.mvs")
PARSER.add_argument('passthrough', nargs=argparse.REMAINDER, help="Option to be passed to command lines")
PARSER.parse_args(namespace=CONF) # store args in the ConfContainer
suffix = os.path.basename(CONF.input_scene).replace('scene_XXXX','')
CONF.input_scene = CONF.input_scene.replace('_dense','').replace('_mesh','').replace('_refine','').replace('_texture','')
# Absolute path for input directory
if len(CONF.input_scene) < 10 or CONF.input_scene[-9:] != '_XXXX.mvs':
sys.exit("%s: invalid scene name" % CONF.input_scene)
match CONF.openMVS_module:
case 'ReconstructMesh':
moduleSuffix = '_mesh.mvs'
case 'RefineMesh':
moduleSuffix = '_refine.mvs'
case 'TextureMesh':
moduleSuffix = '_texture.mvs'
case _:
moduleSuffix = '_dense.mvs'
printout("# Module {} start #".format(CONF.openMVS_module), colour=RED, effect=BOLD)
for scene_name in glob.glob(os.path.abspath(os.path.join(os.path.dirname(CONF.input_scene), 'scene_[0-9][0-9][0-9][0-9]'+suffix))):
if os.path.exists(os.path.splitext(scene_name)[0] + moduleSuffix) == False:
printout("# Process: %s" % os.path.basename(scene_name), colour=GREEN, effect=NO_EFFECT)
# create a commandline for the current step
cmdline = [os.path.join(OPENMVS_BIN, CONF.openMVS_module), scene_name] + CONF.passthrough
print('Cmd: ' + ' '.join(cmdline))
if not DEBUG:
# Launch the current step
try:
pStep = subprocess.Popen(cmdline)
pStep.wait()
if pStep.returncode != 0:
printout("# Warning: step failed", colour=RED, effect=BOLD)
except KeyboardInterrupt:
sys.exit('\r\nProcess canceled by user, all files remains')
else:
print('\t'.join(cmdline))
printout("# Module {} end #".format(CONF.openMVS_module), colour=RED, effect=BOLD)

190
scripts/python/MvsUtils.py

@ -0,0 +1,190 @@
'''
OpenMVS python utilities.
E.g., from MvsUtils import loadDMAP, loadMVSInterface
'''
import numpy as np
def loadDMAP(dmap_path):
with open(dmap_path, 'rb') as dmap:
file_type = dmap.read(2).decode()
content_type = np.frombuffer(dmap.read(1), dtype=np.dtype('B'))
reserve = np.frombuffer(dmap.read(1), dtype=np.dtype('B'))
has_depth = content_type > 0
has_normal = content_type in [3, 7, 11, 15]
has_conf = content_type in [5, 7, 13, 15]
has_views = content_type in [9, 11, 13, 15]
image_width, image_height = np.frombuffer(dmap.read(8), dtype=np.dtype('I'))
depth_width, depth_height = np.frombuffer(dmap.read(8), dtype=np.dtype('I'))
if (file_type != 'DR' or has_depth == False or depth_width <= 0 or depth_height <= 0 or image_width < depth_width or image_height < depth_height):
print('error: opening file \'{}\' for reading depth-data'.format(dmap_path))
return
depth_min, depth_max = np.frombuffer(dmap.read(8), dtype=np.dtype('f'))
file_name_size = np.frombuffer(dmap.read(2), dtype=np.dtype('H'))[0]
file_name = dmap.read(file_name_size).decode()
view_ids_size = np.frombuffer(dmap.read(4), dtype=np.dtype('I'))[0]
reference_view_id, *neighbor_view_ids = np.frombuffer(dmap.read(4 * view_ids_size), dtype=np.dtype('I'))
K = np.frombuffer(dmap.read(72), dtype=np.dtype('d')).reshape(3, 3)
R = np.frombuffer(dmap.read(72), dtype=np.dtype('d')).reshape(3, 3)
C = np.frombuffer(dmap.read(24), dtype=np.dtype('d'))
data = {
'has_normal': has_normal,
'has_conf': has_conf,
'has_views': has_views,
'image_width': image_width,
'image_height': image_height,
'depth_width': depth_width,
'depth_height': depth_height,
'depth_min': depth_min,
'depth_max': depth_max,
'file_name': file_name,
'reference_view_id': reference_view_id,
'neighbor_view_ids': neighbor_view_ids,
'K': K,
'R': R,
'C': C
}
map_size = depth_width * depth_height
depth_map = np.frombuffer(dmap.read(4 * map_size), dtype=np.dtype('f')).reshape(depth_height, depth_width)
data.update({'depth_map': depth_map})
if has_normal:
normal_map = np.frombuffer(dmap.read(4 * map_size * 3), dtype=np.dtype('f')).reshape(depth_height, depth_width, 3)
data.update({'normal_map': normal_map})
if has_conf:
confidence_map = np.frombuffer(dmap.read(4 * map_size), dtype=np.dtype('f')).reshape(depth_height, depth_width)
data.update({'confidence_map': confidence_map})
if has_views:
views_map = np.frombuffer(dmap.read(map_size * 4), dtype=np.dtype('B')).reshape(depth_height, depth_width, 4)
data.update({'views_map': views_map})
return data
def loadMVSInterface(archive_path):
with open(archive_path, 'rb') as mvs:
archive_type = mvs.read(4).decode()
version = np.frombuffer(mvs.read(4), dtype=np.dtype('I')).tolist()[0]
reserve = np.frombuffer(mvs.read(4), dtype=np.dtype('I'))
if archive_type != 'MVSI':
print('error: opening file \'{}\''.format(archive_path))
return
data = {
'project_stream': archive_type,
'project_stream_version': version,
'platforms': [],
'images': [],
'vertices': [],
'vertices_normal': [],
'vertices_color': []
}
platforms_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for platform_index in range(platforms_size):
platform_name_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
platform_name = mvs.read(platform_name_size).decode()
data['platforms'].append({'name': platform_name, 'cameras': []})
cameras_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for camera_index in range(cameras_size):
camera_name_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
camera_name = mvs.read(camera_name_size).decode()
data['platforms'][platform_index]['cameras'].append({'name': camera_name})
if version > 3:
band_name_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
band_name = mvs.read(band_name_size).decode()
data['platforms'][platform_index]['cameras'][camera_index].update({'band_name': band_name})
if version > 0:
width, height = np.frombuffer(mvs.read(8), dtype=np.dtype('I')).tolist()
data['platforms'][platform_index]['cameras'][camera_index].update({'width': width, 'height': height})
K = np.asarray(np.frombuffer(mvs.read(72), dtype=np.dtype('d'))).reshape(3, 3).tolist()
data['platforms'][platform_index]['cameras'][camera_index].update({'K': K, 'poses': []})
identity_matrix = np.asarray(np.frombuffer(mvs.read(96), dtype=np.dtype('d'))).reshape(4, 3)
poses_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(poses_size):
R = np.asarray(np.frombuffer(mvs.read(72), dtype=np.dtype('d'))).reshape(3, 3).tolist()
C = np.asarray(np.frombuffer(mvs.read(24), dtype=np.dtype('d'))).tolist()
data['platforms'][platform_index]['cameras'][camera_index]['poses'].append({'R': R, 'C': C})
images_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for image_index in range(images_size):
name_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
name = mvs.read(name_size).decode()
data['images'].append({'name': name})
if version > 4:
mask_name_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
mask_name = mvs.read(mask_name_size).decode()
data['images'][image_index].update({'mask_name': mask_name})
platform_id, camera_id, pose_id = np.frombuffer(mvs.read(12), dtype=np.dtype('I')).tolist()
data['images'][image_index].update({'platform_id': platform_id, 'camera_id': camera_id, 'pose_id': pose_id})
if version > 2:
id = np.frombuffer(mvs.read(4), dtype=np.dtype('I')).tolist()[0]
data['images'][image_index].update({'id': id})
if version > 6:
min_depth, avg_depth, max_depth = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
data['images'][image_index].update({'min_depth': min_depth, 'avg_depth': avg_depth, 'max_depth': max_depth, 'view_scores': []})
view_score_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(view_score_size):
id, points = np.frombuffer(mvs.read(8), dtype=np.dtype('I')).tolist()
scale, angle, area, score = np.frombuffer(mvs.read(16), dtype=np.dtype('f')).tolist()
data['images'][image_index]['view_scores'].append({'id': id, 'points': points, 'scale': scale, 'angle': angle, 'area': area, 'score': score})
vertices_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for vertex_index in range(vertices_size):
X = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
data['vertices'].append({'X': X, 'views': []})
views_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(views_size):
image_id = np.frombuffer(mvs.read(4), dtype=np.dtype('I')).tolist()[0]
confidence = np.frombuffer(mvs.read(4), dtype=np.dtype('f')).tolist()[0]
data['vertices'][vertex_index]['views'].append({'image_id': image_id, 'confidence': confidence})
vertices_normal_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(vertices_normal_size):
normal = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
data['vertices_normal'].append(normal)
vertices_color_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(vertices_color_size):
color = np.frombuffer(mvs.read(3), dtype=np.dtype('B')).tolist()
data['vertices_color'].append(color)
if version > 0:
data.update({'lines': [], 'lines_normal': [], 'lines_color': []})
lines_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for line_index in range(lines_size):
pt1 = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
pt2 = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
data['lines'].append({'pt1': pt1, 'pt2': pt2, 'views': []})
views_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(views_size):
image_id = np.frombuffer(mvs.read(4), dtype=np.dtype('I')).tolist()[0]
confidence = np.frombuffer(mvs.read(4), dtype=np.dtype('f')).tolist()[0]
data['lines'][line_index]['views'].append({'image_id': image_id, 'confidence': confidence})
lines_normal_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(lines_normal_size):
normal = np.frombuffer(mvs.read(12), dtype=np.dtype('f')).tolist()
data['lines_normal'].append(normal)
lines_color_size = np.frombuffer(mvs.read(8), dtype=np.dtype('Q'))[0]
for _ in range(lines_color_size):
color = np.frombuffer(mvs.read(3), dtype=np.dtype('B')).tolist()
data['lines_color'].append(color)
if version > 1:
transform = np.frombuffer(mvs.read(128), dtype=np.dtype('d')).reshape(4, 4).tolist()
data.update({'transform': transform})
if version > 5:
rot = np.frombuffer(mvs.read(72), dtype=np.dtype('d')).reshape(3, 3).tolist()
pt_min = np.frombuffer(mvs.read(24), dtype=np.dtype('d')).tolist()
pt_max = np.frombuffer(mvs.read(24), dtype=np.dtype('d')).tolist()
data.update({'obb': {'rot': rot, 'pt_min': pt_min, 'pt_max': pt_max}})
return data

126
scripts/python/blender_decimate.py

@ -0,0 +1,126 @@
import bpy, sys, time, math
def clear_default_objects():
for name in ["Cube", "Light", "Camera"]:
if obj := bpy.data.objects.get(name):
bpy.data.objects.remove(obj)
def mark_sharp_edges(obj, angle=30):
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
# 标记30°以上的边为锐边
bpy.ops.mesh.edges_select_sharp(sharpness=math.radians(angle))
bpy.ops.mesh.mark_sharp()
bpy.ops.object.mode_set(mode='OBJECT')
def planar_decimate(obj, angle_limit=35.0, ratio=0.5, protect_uv=True):
"""使用Decimate修改器+智能参数配置"""
print(f"启动高级减面流程 | 模式: 平面优化+权重控制")
#"""
# 添加Decimate修改器(平面模式)
mod = obj.modifiers.new(name="PlanarDecimate", type='DECIMATE')
mod.decimate_type = 'DISSOLVE' # 平面溶解模式
mod.angle_limit = math.radians(angle_limit) # 角度转弧度
mod.use_dissolve_boundaries = False # 溶解边界防止破面[5](@ref)
#"""
"""
mod = obj.modifiers.new(name="CollapseDecimate", type='DECIMATE')
mod.decimate_type = 'COLLAPSE'
mod.ratio = 0.1 # 保留30%面数
mod.use_symmetry = True
mod.vertex_group = "high_detail" # 绑定顶点组保护细节区域
#"""
# 保护关键属性
mod.delimit = {'NORMAL', 'UV'} if protect_uv else {'NORMAL'} # UV保护[1](@ref)
mod.use_symmetry = True # 对称保护[1](@ref)
# 保护边界:创建顶点组标记轮廓边
if "boundary_edges" not in obj.vertex_groups:
obj.vertex_groups.new(name="boundary_edges")
mod.vertex_group = "boundary_edges"
mod.invert_vertex_group = True # 仅减非边界区域
# 应用修改器
bpy.context.view_layer.objects.active = obj
bpy.ops.object.modifier_apply(modifier=mod.name)
print(f"平面优化完成 | 容差角: {angle_limit}°")
def collapse_decimate(obj, ratio=0.3, protect_groups=True):
"""核心减面:Collapse模式+细节保护"""
mod = obj.modifiers.new(name="CollapseDecimate", type='DECIMATE')
mod.decimate_type = 'COLLAPSE'
mod.ratio = ratio
# 顶点组保护(需提前在Blender中标记高细节区域)[1](@ref)
if protect_groups and "high_detail" in obj.vertex_groups:
mod.vertex_group = "high_detail"
mod.invert_vertex_group = True # 仅减非保护区域
print("启用顶点组保护 | 保留高细节区域")
# 关键保护设置
mod.use_symmetry = True
bpy.ops.object.modifier_apply(modifier=mod.name)
print(f"塌陷减面完成 | 保留比例: {ratio*100}%")
def triangulate_mesh(obj, quad_method='BEAUTY', ngon_method='BEAUTY'):
"""将模型三角化[1,2](@ref)"""
print("启动三角化处理...")
# 添加三角化修改器
mod = obj.modifiers.new(name="Triangulate", type='TRIANGULATE')
mod.quad_method = quad_method # 四边形处理方法
mod.ngon_method = ngon_method # 多边形处理方法
mod.min_vertices = 4 # 对四边形及以上面进行三角化
# 应用修改器
bpy.context.view_layer.objects.active = obj
bpy.ops.object.modifier_apply(modifier=mod.name)
# 验证三角化结果
tris = sum(1 for poly in obj.data.polygons if len(poly.vertices) == 3)
total_faces = len(obj.data.polygons)
print(f"三角化完成 | 三角面比例: {tris/total_faces*100:.1f}% ({tris}/{total_faces})")
def optimize_geometry(obj):
"""后处理:清理拓扑+法线修复[1,6](@ref)"""
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.remove_doubles(threshold=0.001) # 合并重复顶点
bpy.ops.mesh.normals_make_consistent() # 统一法线
bpy.ops.object.mode_set(mode='OBJECT')
print("拓扑清理完成 | 法线已重计算")
if __name__ == "__main__":
argv = sys.argv[sys.argv.index("--") + 1:]
input_path = argv[0]
angle_limit = float(argv[1]) if len(argv) > 1 else 5.0
output_path = argv[2] if len(argv) > 2 else input_path.replace(".ply", "_decimated.ply")
# 硬件加速配置
# bpy.context.scene.cycles.device = 'GPU'
# bpy.context.preferences.system.memory_cache_limit = 4096
clear_default_objects()
# 导入PLY模型
bpy.ops.wm.ply_import(filepath=input_path)
obj = bpy.context.selected_objects[0]
print(f"开始减面操作:{obj.name}")
# 执行减面
mark_sharp_edges(obj)
planar_decimate(obj, angle_limit) # 先平面优化
collapse_decimate(obj, ratio=0.2) # 再智能塌陷 // 0.1-3效果好,0.2-3综合不错
optimize_geometry(obj) # 最终清理
triangulate_mesh(obj)
print("减面完成")
bpy.ops.wm.ply_export(
filepath=output_path,
apply_modifiers=False
)
print(f"模型已导出至:{output_path}")

10
scripts/python/run.sh

@ -0,0 +1,10 @@
cd ~/code/openMVS/make
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=/lib --build ..
make -j20 && make install
#cmake --build . -j4
#cmake --install .
cd ~/code/openMVS/scripts/python
#InterfaceCOLMAP -i /data/datasets/scan1/output/sfm/dense -o scene.mvs --image-folder /data/datasets/scan1/output/sfm/dense/images -w "/data/datasets/scan1/output/mvs"
#TextureMesh scene_dense.mvs -m scene_dense_mesh.ply -o scene_dense_mesh_texture.mvs --decimate 0.5 -w "/data/datasets/scan1/output2"
# TextureMesh /data/datasets/66536 --image-folder /data/datasets/66536/images/ -m /data/datasets/66536/gsmodel/train/ours_15000/waterobj/final.ply -o mesh.mvs --decimate 0.1 -w "/data/datasets/66536" --virtual-face-images 3
# TextureMesh /data/datasets/work_dir/227124 --image-folder /data/datasets/work_dir/227124/images/ -m /data/datasets/work_dir/227124/gsmodel/train/ours_16000/waterobj/final.ply -o mesh.mvs --outlier-threshold 0.1 --decimate 0.05 --sharpness-weight 0.2 -w /data/datasets/work_dir/227124/gsmodel/train/ours_16000/mesh/ --virtual-face-images 3 --patch-packing-heuristic 0 --max-texture-size 0
Loading…
Cancel
Save