import yaml import oss2 import os from tqdm import tqdm # from utils.log_utils import log_execution import os from pathlib import Path import numpy as np import collections import struct import math import os import argparse from config import print_factory_type_dir from config import oss_config from config import print_data_dir from config import url_get_oss_suffix_by_orderId from general import is_use_debug_oss from general import transform_save_o3d CameraModel = collections.namedtuple( "CameraModel", ["model_id", "model_name", "num_params"] ) Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"]) BaseImage = collections.namedtuple( "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"] ) Point3D = collections.namedtuple( "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"] ) CAMERA_MODELS = { CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), CameraModel(model_id=1, model_name="PINHOLE", num_params=4), CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), CameraModel(model_id=3, model_name="RADIAL", num_params=5), CameraModel(model_id=4, model_name="OPENCV", num_params=8), CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), CameraModel(model_id=7, model_name="FOV", num_params=5), CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), } CAMERA_MODEL_IDS = dict( [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] ) CAMERA_MODEL_NAMES = dict( [(camera_model.model_name, camera_model) for camera_model in CAMERA_MODELS] ) def qvec2rotmat(qvec): return np.array( [ [ 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], ], [ 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], ], [ 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, ], ] ) def rotmat2qvec(R): Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat K = ( np.array( [ [Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz], ] ) / 3.0 ) eigvals, eigvecs = np.linalg.eigh(K) qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] if qvec[0] < 0: qvec *= -1 return qvec class Image(BaseImage): def qvec2rotmat(self): return qvec2rotmat(self.qvec) def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): """Read and unpack the next bytes from a binary file. :param fid: :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. :param endian_character: Any of {@, =, <, >, !} :return: Tuple of read and unpacked values. """ data = fid.read(num_bytes) return struct.unpack(endian_character + format_char_sequence, data) def read_points3D_text(path): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) """ xyzs = None rgbs = None errors = None num_points = 0 with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": num_points += 1 xyzs = np.empty((num_points, 3)) rgbs = np.empty((num_points, 3)) errors = np.empty((num_points, 1)) count = 0 with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() xyz = np.array(tuple(map(float, elems[1:4]))) rgb = np.array(tuple(map(int, elems[4:7]))) error = np.array(float(elems[7])) xyzs[count] = xyz rgbs[count] = rgb errors[count] = error count += 1 return xyzs, rgbs, errors def read_points3D_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ with open(path_to_model_file, "rb") as fid: num_points = read_next_bytes(fid, 8, "Q")[0] xyzs = np.empty((num_points, 3)) rgbs = np.empty((num_points, 3)) errors = np.empty((num_points, 1)) for p_id in range(num_points): binary_point_line_properties = read_next_bytes( fid, num_bytes=43, format_char_sequence="QdddBBBd" ) xyz = np.array(binary_point_line_properties[1:4]) rgb = np.array(binary_point_line_properties[4:7]) error = np.array(binary_point_line_properties[7]) track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ 0 ] track_elems = read_next_bytes( fid, num_bytes=8 * track_length, format_char_sequence="ii" * track_length, ) xyzs[p_id] = xyz rgbs[p_id] = rgb errors[p_id] = error return xyzs, rgbs, errors def read_intrinsics_text(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py """ cameras = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() camera_id = int(elems[0]) model = elems[1] assert ( model == "PINHOLE" ), "While the loader support other types, the rest of the code assumes PINHOLE" width = int(elems[2]) height = int(elems[3]) params = np.array(tuple(map(float, elems[4:]))) cameras[camera_id] = Camera( id=camera_id, model=model, width=width, height=height, params=params ) return cameras def read_extrinsics_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path) """ images = {} with open(path_to_model_file, "rb") as fid: num_reg_images = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_reg_images): binary_image_properties = read_next_bytes( fid, num_bytes=64, format_char_sequence="idddddddi" ) image_id = binary_image_properties[0] qvec = np.array(binary_image_properties[1:5]) tvec = np.array(binary_image_properties[5:8]) camera_id = binary_image_properties[8] image_name = "" current_char = read_next_bytes(fid, 1, "c")[0] while current_char != b"\x00": # look for the ASCII 0 entry image_name += current_char.decode("utf-8") current_char = read_next_bytes(fid, 1, "c")[0] num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ 0 ] x_y_id_s = read_next_bytes( fid, num_bytes=24 * num_points2D, format_char_sequence="ddq" * num_points2D, ) xys = np.column_stack( [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] ) point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids, ) return images def read_intrinsics_binary(path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path) """ cameras = {} with open(path_to_model_file, "rb") as fid: num_cameras = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_cameras): camera_properties = read_next_bytes( fid, num_bytes=24, format_char_sequence="iiQQ" ) camera_id = camera_properties[0] model_id = camera_properties[1] model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name width = camera_properties[2] height = camera_properties[3] num_params = CAMERA_MODEL_IDS[model_id].num_params params = read_next_bytes( fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params ) cameras[camera_id] = Camera( id=camera_id, model=model_name, width=width, height=height, params=np.array(params), ) assert len(cameras) == num_cameras return cameras def focal2fov(focal, pixels): return 2 * math.atan(pixels / (2 * focal)) def read_extrinsics_text(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py """ images = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() image_id = int(elems[0]) qvec = np.array(tuple(map(float, elems[1:5]))) tvec = np.array(tuple(map(float, elems[5:8]))) camera_id = int(elems[8]) image_name = elems[9] elems = fid.readline().split() xys = np.column_stack( [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] ) point3D_ids = np.array(tuple(map(int, elems[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids, ) return images def read_colmap_bin_array(path): """ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py :param path: path to the colmap binary file. :return: nd array with the floating point values in the value """ with open(path, "rb") as fid: width, height, channels = np.genfromtxt( fid, delimiter="&", max_rows=1, usecols=(0, 1, 2), dtype=int ) fid.seek(0) num_delimiter = 0 byte = fid.read(1) while True: if byte == b"&": num_delimiter += 1 if num_delimiter >= 3: break byte = fid.read(1) array = np.fromfile(fid, np.float32) array = array.reshape((width, height, channels), order="F") return np.transpose(array, (1, 0, 2)).squeeze() def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): """Read and unpack the next bytes from a binary file. :param fid: :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. :param endian_character: Any of {@, =, <, >, !} :return: Tuple of read and unpacked values. """ data = fid.read(num_bytes) return struct.unpack(endian_character + format_char_sequence, data) def write_next_bytes(fid, data, format_char_sequence, endian_character="<"): """pack and write to a binary file. :param fid: :param data: data to send, if multiple elements are sent at the same time, they should be encapsuled either in a list or a tuple :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. should be the same length as the data list or tuple :param endian_character: Any of {@, =, <, >, !} """ if isinstance(data, (list, tuple)): bytes = struct.pack(endian_character + format_char_sequence, *data) else: bytes = struct.pack(endian_character + format_char_sequence, data) fid.write(bytes) def read_cameras_text(path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::WriteCamerasText(const std::string& path) void Reconstruction::ReadCamerasText(const std::string& path) """ cameras = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() camera_id = int(elems[0]) model = elems[1] width = int(elems[2]) height = int(elems[3]) params = np.array(tuple(map(float, elems[4:]))) cameras[camera_id] = Camera( id=camera_id, model=model, width=width, height=height, params=params, ) return cameras def read_cameras_binary(path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path) """ cameras = {} with open(path_to_model_file, "rb") as fid: num_cameras = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_cameras): camera_properties = read_next_bytes( fid, num_bytes=24, format_char_sequence="iiQQ" ) camera_id = camera_properties[0] model_id = camera_properties[1] model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name width = camera_properties[2] height = camera_properties[3] num_params = CAMERA_MODEL_IDS[model_id].num_params params = read_next_bytes( fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params, ) cameras[camera_id] = Camera( id=camera_id, model=model_name, width=width, height=height, params=np.array(params), ) assert len(cameras) == num_cameras return cameras def write_cameras_text(cameras, path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::WriteCamerasText(const std::string& path) void Reconstruction::ReadCamerasText(const std::string& path) """ HEADER = ( "# Camera list with one line of data per camera:\n" + "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n" + "# Number of cameras: {}\n".format(len(cameras)) ) with open(path, "w") as fid: fid.write(HEADER) for _, cam in cameras.items(): to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params] line = " ".join([str(elem) for elem in to_write]) fid.write(line + "\n") def write_cameras_binary(cameras, path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::WriteCamerasBinary(const std::string& path) void Reconstruction::ReadCamerasBinary(const std::string& path) """ with open(path_to_model_file, "wb") as fid: write_next_bytes(fid, len(cameras), "Q") for _, cam in cameras.items(): model_id = CAMERA_MODEL_NAMES[cam.model].model_id camera_properties = [cam.id, model_id, cam.width, cam.height] write_next_bytes(fid, camera_properties, "iiQQ") for p in cam.params: write_next_bytes(fid, float(p), "d") return cameras def read_images_text(path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path) """ images = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() image_id = int(elems[0]) qvec = np.array(tuple(map(float, elems[1:5]))) tvec = np.array(tuple(map(float, elems[5:8]))) camera_id = int(elems[8]) image_name = elems[9] elems = fid.readline().split() xys = np.column_stack( [ tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3])), ] ) point3D_ids = np.array(tuple(map(int, elems[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids, ) return images def read_images_binary(path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path) """ images = {} with open(path_to_model_file, "rb") as fid: num_reg_images = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_reg_images): binary_image_properties = read_next_bytes( fid, num_bytes=64, format_char_sequence="idddddddi" ) image_id = binary_image_properties[0] qvec = np.array(binary_image_properties[1:5]) tvec = np.array(binary_image_properties[5:8]) camera_id = binary_image_properties[8] binary_image_name = b"" current_char = read_next_bytes(fid, 1, "c")[0] while current_char != b"\x00": # look for the ASCII 0 entry binary_image_name += current_char current_char = read_next_bytes(fid, 1, "c")[0] image_name = binary_image_name.decode("utf-8") num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ 0 ] x_y_id_s = read_next_bytes( fid, num_bytes=24 * num_points2D, format_char_sequence="ddq" * num_points2D, ) xys = np.column_stack( [ tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3])), ] ) point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) images[image_id] = Image( id=image_id, qvec=qvec, tvec=tvec, camera_id=camera_id, name=image_name, xys=xys, point3D_ids=point3D_ids, ) return images def write_images_text(images, path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadImagesText(const std::string& path) void Reconstruction::WriteImagesText(const std::string& path) """ if len(images) == 0: mean_observations = 0 else: mean_observations = sum( (len(img.point3D_ids) for _, img in images.items()) ) / len(images) HEADER = ( "# Image list with two lines of data per image:\n" + "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + "# POINTS2D[] as (X, Y, POINT3D_ID)\n" + "# Number of images: {}, mean observations per image: {}\n".format( len(images), mean_observations ) ) with open(path, "w") as fid: fid.write(HEADER) for _, img in images.items(): image_header = [ img.id, *img.qvec, *img.tvec, img.camera_id, img.name, ] first_line = " ".join(map(str, image_header)) fid.write(first_line + "\n") points_strings = [] for xy, point3D_id in zip(img.xys, img.point3D_ids): points_strings.append(" ".join(map(str, [*xy, point3D_id]))) fid.write(" ".join(points_strings) + "\n") def write_images_binary(images, path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadImagesBinary(const std::string& path) void Reconstruction::WriteImagesBinary(const std::string& path) """ with open(path_to_model_file, "wb") as fid: write_next_bytes(fid, len(images), "Q") for _, img in images.items(): write_next_bytes(fid, img.id, "i") write_next_bytes(fid, img.qvec.tolist(), "dddd") write_next_bytes(fid, img.tvec.tolist(), "ddd") write_next_bytes(fid, img.camera_id, "i") for char in img.name: write_next_bytes(fid, char.encode("utf-8"), "c") write_next_bytes(fid, b"\x00", "c") write_next_bytes(fid, len(img.point3D_ids), "Q") for xy, p3d_id in zip(img.xys, img.point3D_ids): write_next_bytes(fid, [*xy, p3d_id], "ddq") def read_points3D_text(path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) """ points3D = {} with open(path, "r") as fid: while True: line = fid.readline() if not line: break line = line.strip() if len(line) > 0 and line[0] != "#": elems = line.split() point3D_id = int(elems[0]) xyz = np.array(tuple(map(float, elems[1:4]))) rgb = np.array(tuple(map(int, elems[4:7]))) error = float(elems[7]) image_ids = np.array(tuple(map(int, elems[8::2]))) point2D_idxs = np.array(tuple(map(int, elems[9::2]))) points3D[point3D_id] = Point3D( id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs, ) return points3D def read_points3D_binary(path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ points3D = {} with open(path_to_model_file, "rb") as fid: num_points = read_next_bytes(fid, 8, "Q")[0] for _ in range(num_points): binary_point_line_properties = read_next_bytes( fid, num_bytes=43, format_char_sequence="QdddBBBd" ) point3D_id = binary_point_line_properties[0] xyz = np.array(binary_point_line_properties[1:4]) rgb = np.array(binary_point_line_properties[4:7]) error = np.array(binary_point_line_properties[7]) track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ 0 ] track_elems = read_next_bytes( fid, num_bytes=8 * track_length, format_char_sequence="ii" * track_length, ) image_ids = np.array(tuple(map(int, track_elems[0::2]))) point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) points3D[point3D_id] = Point3D( id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs, ) return points3D def write_points3D_text(points3D, path): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadPoints3DText(const std::string& path) void Reconstruction::WritePoints3DText(const std::string& path) """ if len(points3D) == 0: mean_track_length = 0 else: mean_track_length = sum( (len(pt.image_ids) for _, pt in points3D.items()) ) / len(points3D) HEADER = ( "# 3D point list with one line of data per point:\n" + "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n" + "# Number of points: {}, mean track length: {}\n".format( len(points3D), mean_track_length ) ) with open(path, "w") as fid: fid.write(HEADER) for _, pt in points3D.items(): point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error] fid.write(" ".join(map(str, point_header)) + " ") track_strings = [] for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs): track_strings.append(" ".join(map(str, [image_id, point2D]))) fid.write(" ".join(track_strings) + "\n") def write_points3D_binary(points3D, path_to_model_file): """ see: src/colmap/scene/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ with open(path_to_model_file, "wb") as fid: write_next_bytes(fid, len(points3D), "Q") for _, pt in points3D.items(): write_next_bytes(fid, pt.id, "Q") write_next_bytes(fid, pt.xyz.tolist(), "ddd") write_next_bytes(fid, pt.rgb.tolist(), "BBB") write_next_bytes(fid, pt.error, "d") track_length = pt.image_ids.shape[0] write_next_bytes(fid, track_length, "Q") for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs): write_next_bytes(fid, [image_id, point2D_id], "ii") def detect_model_format(path, ext): if ( os.path.isfile(os.path.join(path, "cameras" + ext)) and os.path.isfile(os.path.join(path, "images" + ext)) and os.path.isfile(os.path.join(path, "points3D" + ext)) ): print("Detected model format: '" + ext + "'") return True return False def read_model(path, ext=""): # try to detect the extension automatically if ext == "": if detect_model_format(path, ".bin"): ext = ".bin" elif detect_model_format(path, ".txt"): ext = ".txt" else: print("Provide model format: '.bin' or '.txt'") return if ext == ".txt": cameras = read_cameras_text(os.path.join(path, "cameras" + ext)) images = read_images_text(os.path.join(path, "images" + ext)) points3D = read_points3D_text(os.path.join(path, "points3D") + ext) else: cameras = read_cameras_binary(os.path.join(path, "cameras" + ext)) images = read_images_binary(os.path.join(path, "images" + ext)) points3D = read_points3D_binary(os.path.join(path, "points3D") + ext) return cameras, images, points3D def write_model(cameras, images, points3D, path, ext=".bin"): if ext == ".txt": write_cameras_text(cameras, os.path.join(path, "cameras" + ext)) write_images_text(images, os.path.join(path, "images" + ext)) write_points3D_text(points3D, os.path.join(path, "points3D") + ext) else: write_cameras_binary(cameras, os.path.join(path, "cameras" + ext)) write_images_binary(images, os.path.join(path, "images" + ext)) write_points3D_binary(points3D, os.path.join(path, "points3D") + ext) return cameras, images, points3D def qvec2rotmat(qvec): return np.array( [ [ 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], ], [ 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], ], [ 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, ], ] ) def rotmat2qvec(R): Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat K = ( np.array( [ [Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz], ] ) / 3.0 ) eigvals, eigvecs = np.linalg.eigh(K) qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] if qvec[0] < 0: qvec *= -1 return qvec def get_oss_client(cfg_path): with open(os.path.expanduser(cfg_path), "r") as config: cfg = yaml.safe_load(config) AccessKeyId_down = cfg["run"]["down"]["AccessKeyId"] AccessKeySecret_down = cfg["run"]["down"]["AccessKeySecret"] Endpoint_down = cfg["run"]["down"]["Endpoint"] Bucket_down = cfg["run"]["down"]["Bucket"] oss_client = oss2.Bucket( oss2.Auth(AccessKeyId_down, AccessKeySecret_down), Endpoint_down, Bucket_down ) return oss_client class DataTransfer: ''' 数据传输类 ''' def __init__(self, local_path: str, oss_path: str, oss_client: oss2.Bucket): ''' local_path: 本地输出路径 oss_path: oss路径 oss_client: oss客户端 ''' self.local_path = local_path self.oss_path = oss_path.lstrip('/') self.oss_client = oss_client # self.description = description # @log_execution(self.description) def download_data(self): """ 从 OSS 下载数据到本地,保持原有目录结构 """ # 列出所有对象 objects = [] prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): if obj.key != prefix: # 跳过目录本身 objects.append(obj.key) # 下载所有文件,添加进度条 for obj_key in tqdm(objects, desc="下载进度"): if obj_key.endswith('/'): continue if "printId" in obj_key: continue # 计算相对路径 rel_path = obj_key[len(prefix):].lstrip('/') # 构建本地完整路径 local_path = os.path.join(self.local_path, rel_path) # 创建必要的目录 os.makedirs(os.path.dirname(local_path), exist_ok=True) # 下载文件 self.oss_client.get_object_to_file(obj_key, local_path) print("download_data local_path=" + local_path) order_id: str pid: str model_height: str def download_data_rename_json(self, json_model_info): """ 从 OSS 下载数据到本地,保持原有目录结构 """ # 列出所有对象 objects = [] prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): if obj.key != prefix: # 跳过目录本身 objects.append(obj.key) # 下载所有文件,添加进度条 for obj_key in tqdm(objects, desc="下载进度"): if obj_key.endswith('/'): continue if "printId" in obj_key: continue # 计算相对路径 rel_path = obj_key[len(prefix):].lstrip('/') file_dir, file_name = os.path.split(rel_path) file_base, file_ext = os.path.splitext(file_name) # 根据文件后缀名进行重命名 if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: # 对于.mtl和图片文件,在原名前加order_id new_file_name = f"{json_model_info.order_id}_{file_name}" # new_file_name = file_name elif file_ext.lower() == '.obj': # 对于.obj文件,完全重命名 new_file_name = f"{json_model_info.obj_name}" else: # 其他文件类型保持原名 new_file_name = file_name print("new_file_name=", new_file_name) # 构建新的相对路径 if file_dir: # 如果有子目录 new_rel_path = os.path.join(file_dir, new_file_name) else: new_rel_path = new_file_name # 构建本地完整路径 local_path = os.path.join(self.local_path, new_rel_path) # 创建必要的目录 os.makedirs(os.path.dirname(local_path), exist_ok=True) # 下载文件 self.oss_client.get_object_to_file(obj_key, local_path) if file_ext == '.obj': # 10MB以上 try: # 使用临时文件避免内存问题 [8](@ref) temp_path = local_path + '.tmp' with open(local_path, 'r', encoding='utf-8') as f_in, \ open(temp_path, 'w', encoding='utf-8') as f_out: mtllib_modified = False for line in f_in: if not mtllib_modified and line.strip().startswith('mtllib '): parts = line.split(' ', 1) if len(parts) > 1: old_mtl_name = parts[1].strip() new_mtl_name = f"{json_model_info.order_id}_{old_mtl_name}" f_out.write(f"mtllib {new_mtl_name}\n") mtllib_modified = True continue f_out.write(line) os.replace(temp_path, local_path) # 原子性替换 except IOError as e: print(f"处理大文件 {local_path} 时出错: {e}") if os.path.exists(temp_path): os.remove(temp_path) # 优化后的.obj文件处理逻辑 if file_ext == '.mtl': try: # 使用更高效的文件读取方式 [6,8](@ref) with open(local_path, 'r', encoding='utf-8') as f: content = f.read() # 使用字符串方法直接查找和替换,避免不必要的循环 [9](@ref) lines = content.split('\n') mtllib_modified = False for i, line in enumerate(lines): stripped_line = line.strip() if not mtllib_modified and stripped_line.startswith('map_Kd '): # 更高效的分割方式 [9](@ref) parts = line.split(' ', 1) if len(parts) > 1: old_name = parts[1].strip() new_name = f"{json_model_info.order_id}_{old_name}" lines[i] = f"map_Kd {new_name}" mtllib_modified = True print(f"已更新材质库引用: {old_name} -> {new_name}") break # 找到第一个后立即退出 # 批量写入,减少I/O操作 [6](@ref) with open(local_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) except IOError as e: print(f"处理文件 {local_path} 时出错: {e}") except UnicodeDecodeError as e: print(f"文件编码错误 {local_path}: {e}") print(f"下载文件: {obj_key} -> {local_path}") def download_data_rename_batch(self, batch_model_info): """ 从 OSS 下载数据到本地,保持原有目录结构 """ # 列出所有对象 objects = [] prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 prefix_exists = False for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): prefix_exists = True if obj.key != prefix: # 跳过目录本身 objects.append(obj.key) print(f"obj.key={obj.key}") if not prefix_exists: print(f"前缀 '{prefix}' 下没有找到任何文件或目录。") return False else: print(f"前缀 '{prefix}' 存在,共找到 {len(objects)} 个对象。") # 下载所有文件,添加进度条 for obj_key in tqdm(objects, desc="下载进度"): if obj_key.endswith('/'): print("下载 endswith('/'") continue if "printId" in obj_key: print(f"下载 in obj_key") continue # 计算相对路径 rel_path = obj_key[len(prefix):].lstrip('/') file_dir, file_name = os.path.split(rel_path) file_base, file_ext = os.path.splitext(file_name) # 根据文件后缀名进行重命名 if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: # 对于.mtl和图片文件,在原名前加order_id new_file_name = f"{batch_model_info.order_id}_{file_name}" # new_file_name = file_name elif file_ext.lower() == '.obj': # 对于.obj文件,完全重命名 new_file_name = f"{batch_model_info.order_id}_{batch_model_info.pid}_P{batch_model_info.print_order_id}_{batch_model_info.model_size}{file_ext}" else: # 其他文件类型保持原名 new_file_name = file_name # 构建新的相对路径 if file_dir: # 如果有子目录 new_rel_path = os.path.join(file_dir, new_file_name) else: new_rel_path = new_file_name # 构建本地完整路径 local_path = os.path.join(self.local_path, new_rel_path) # 创建必要的目录 os.makedirs(os.path.dirname(local_path), exist_ok=True) # 下载文件 self.oss_client.get_object_to_file(obj_key, local_path) if file_ext == '.obj': # 10MB以上 try: # 使用临时文件避免内存问题 [8](@ref) temp_path = local_path + '.tmp' with open(local_path, 'r', encoding='utf-8') as f_in, \ open(temp_path, 'w', encoding='utf-8') as f_out: mtllib_modified = False for line in f_in: if not mtllib_modified and line.strip().startswith('mtllib '): parts = line.split(' ', 1) if len(parts) > 1: old_mtl_name = parts[1].strip() new_mtl_name = f"{batch_model_info.order_id}_{old_mtl_name}" f_out.write(f"mtllib {new_mtl_name}\n") mtllib_modified = True print("len(parts) > 1") continue f_out.write(line) os.replace(temp_path, local_path) # 原子性替换 except IOError as e: print(f"处理大文件 {local_path} 时出错: {e}") if os.path.exists(temp_path): os.remove(temp_path) # 优化后的.obj文件处理逻辑 if file_ext == '.mtl': try: # 使用更高效的文件读取方式 [6,8](@ref) with open(local_path, 'r', encoding='utf-8') as f: content = f.read() # 使用字符串方法直接查找和替换,避免不必要的循环 [9](@ref) lines = content.split('\n') mtllib_modified = False for i, line in enumerate(lines): stripped_line = line.strip() if not mtllib_modified and stripped_line.startswith('map_Kd '): # 更高效的分割方式 [9](@ref) parts = line.split(' ', 1) if len(parts) > 1: old_name = parts[1].strip() new_name = f"{batch_model_info.order_id}_{old_name}" lines[i] = f"map_Kd {new_name}" mtllib_modified = True print(f"已更新材质库引用: {old_name} -> {new_name}") break # 找到第一个后立即退出 # 批量写入,减少I/O操作 [6](@ref) with open(local_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) except IOError as e: print(f"处理文件 {local_path} 时出错: {e}") except UnicodeDecodeError as e: print(f"文件编码错误 {local_path}: {e}") print(f"下载文件: {obj_key} -> {local_path}") return True def download_single_file(self): """ 下载单个文件从OSS到本地 """ # 确保本地目录存在 os.makedirs(os.path.dirname(self.local_path), exist_ok=True) # 直接下载文件 try: self.oss_client.get_object_to_file(self.oss_path, self.local_path) print(f"文件已下载到: {self.local_path}") except oss2.exceptions.NoSuchKey: print(f"OSS文件不存在: {self.oss_path}") def upload_data(self): ''' 上传数据到OSS ''' # 检测本地路径是否存在 if not os.path.exists(self.local_path): raise FileNotFoundError(f"本地路径不存在: {self.local_path}") # 判断本地路径是文件还是目录 if os.path.isfile(self.local_path): local_suffix = Path(self.local_path).suffix oss_suffix = Path(self.oss_path).suffix if oss_suffix and oss_suffix != local_suffix: # 后缀名不一致,上传到指定文件夹下的同名文件 oss_dir = os.path.dirname(self.oss_path) oss_target_path = os.path.join(oss_dir, os.path.basename(self.local_path)) else: # 后缀名一致,上传到指定OSS路径 oss_target_path = self.oss_path # 上传文件 self.oss_client.put_object_from_file(oss_target_path, self.local_path) print(f"文件已上传到: {oss_target_path}") elif os.path.isdir(self.local_path): oss_suffix = Path(self.oss_path).suffix if oss_suffix: raise ValueError("不能将目录上传到具有后缀名的OSS路径。") # 遍历本地目录并上传 for root, dirs, files in os.walk(self.local_path): for file in files: local_file_path = os.path.join(root, file) relative_path = os.path.relpath(local_file_path, self.local_path) oss_file_path = os.path.join(self.oss_path, relative_path).replace("\\", "/") # 创建必要的目录 oss_dir = os.path.dirname(oss_file_path) # 上传文件 self.oss_client.put_object_from_file(oss_file_path, local_file_path) print(f"文件已上传到: {oss_file_path}") else: raise ValueError(f"无效的本地路径类型: {self.local_path}") import requests import json import shutil def get_api(url): try: response = requests.get(url) response.raise_for_status() # 检查请求是否成功 response = json.loads(response.text) if response.get("code") != 1000: raise Exception(f"Error fetching URL {url}: {response.get('message')}") else: return response except requests.exceptions.RequestException as e: raise Exception(f"Error fetching URL {url}: {e}") from dataclasses import dataclass @dataclass class JSONModelInfo: obj_name: str order_id: str pid: str model_height: str def read_pids_from_json(pid_file): """从文件读取所有PID""" # with open(pid_file, 'r') as f: # # 过滤掉空行并去除每行首尾的空白字符 # return [line.strip() for line in f if line.strip()] json_path = pid_file """ 加载JSON文件,读取所有模型信息,应用变换后返回模型列表 """ # 检查JSON文件是否存在 if not os.path.exists(json_path): print(f"错误: JSON文件不存在 - {json_path}") return [] # 读取JSON文件 try: with open(json_path, 'r') as f: data = json.load(f) except Exception as e: print(f"读取JSON文件失败: {e}") return [] list_model_info = [] # 处理每个模型 for model in data.get('models', []): obj_name = model.get('file_name', '') parts = obj_name.split('_') order_id = parts[0] pid = parts[1] model_height = parts[3] model_info = JSONModelInfo( obj_name=obj_name, order_id=order_id, pid=pid, model_height=model_height ) list_model_info.append(model_info) return list_model_info, data def download_data_by_json(model_info, workdir, oss_client ): try: pid = model_info.pid model_height = model_info.model_height target_dir = f"{workdir}" url = f"{url_get_oss_suffix_by_orderId}{model_info.order_id}" res = requests.get(url) data = res.json()["data"] # print("datas=",data) data = data.replace("/init_obj", "") print("target_dir=", target_dir) # download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/base/model/{model_height}/", oss_client) # download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/base_cartoon/badge/101/3/{model_height}/", oss_client) download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/{data}/{model_height}/", oss_client) download_textures.download_data_rename_json(model_info) # 下载后检查目标文件夹是否为空 if os.path.exists(target_dir) and not os.listdir(target_dir): shutil.rmtree(target_dir) print(f"下载后检查发现目标文件夹为空,已删除: {target_dir}") except Exception as e: print(f"卡通图片下载失败: {pid}, 错误: {str(e)}") pass @dataclass class BatchModelInfo: order_id: str pid: str print_order_id: str model_size: str path: str count: str def read_paths_from_batch(batch_id): url = f"https://mp.api.suwa3d.com/api/printOrder/getInfoByPrintBatchId?batch_id={batch_id}" res = requests.get(url) datas = res.json()["data"] print("datas=",datas) list_print_model_info = [] for data in datas: batch_model_info = BatchModelInfo( order_id=data["order_id"], pid=data["pid"], print_order_id=data["print_order_id"], model_size=data["model_size"], path=data["path"], count=data["quantity"] ) list_print_model_info.append(batch_model_info) return list_print_model_info, datas def download_data_by_batch(batch_model_info, workdir, oss_client ): try: target_dir = f"{workdir}" print("target_dir=", target_dir) path = batch_model_info.path download_textures = DataTransfer(target_dir, f"{path}/", oss_client) if not download_textures.download_data_rename_batch(batch_model_info): print("fail download_data_rename_batch") return False # 下载后检查目标文件夹是否为空 if os.path.exists(target_dir) and not os.listdir(target_dir): shutil.rmtree(target_dir) print(f"下载后检查发现目标文件夹为空,已删除: {target_dir}") except Exception as e: print(f"下载失败: {path}, 错误: {str(e)}") pass return True def download_datas_by_batch(batch_id, workdir, oss_config): oss_client = get_oss_client(oss_config) # 读取所有path list_print_model_info, datas = read_paths_from_batch(batch_id) print(f"从文件读取了 {len(list_print_model_info)} 个path") # 批量下载 for batch_model_info in list_print_model_info: print(f"开始下载print_model_info: {batch_model_info}") if not download_data_by_batch(batch_model_info, workdir, oss_client): return datas, False return datas, True def download_datas_by_pre_layout(list_print_model_info, workdir, oss_config): oss_client = get_oss_client(oss_config) print(f"从文件读取了 {len(list_print_model_info)} 个path") # 批量下载 for batch_model_info in list_print_model_info: print(f"开始下载print_model_info: {batch_model_info}") if not download_data_by_batch(batch_model_info, workdir, oss_client): return False return True def download_transform_save_by_batch(batch_id, workdir, oss_config): datas, succ = download_datas_by_batch(batch_id, workdir, oss_config) print("datas=", datas) layout_data = datas["layout_data"] original_obj_pid_dir = workdir transform_save_o3d(layout_data, original_obj_pid_dir) def download_datas_by_json(pid_file, workdir, oss_config): oss_client = get_oss_client(oss_config) #json_path = os.path.join(workdir, "3DPrintLayout.json") json_path = os.path.join(workdir, f"{pid_file}.json") # 读取所有PID list_model_info, data = read_pids_from_json(json_path) print(f"从文件读取了 {len(list_model_info)} 个PID") # 批量下载 for model_info in list_model_info: print(f"开始下载PID: {model_info}") download_data_by_json(model_info, args.workdir, oss_client) return data def download_transform_save_by_json(pid_file, workdir, oss_config): layout_data = download_datas_by_json(pid_file, workdir, oss_config) original_obj_pid_dir = workdir transform_save_o3d(layout_data, original_obj_pid_dir) def upload_result(base_original_obj_dir, oss_config, batch_id): oss_client = get_oss_client(oss_config) try: target_dir = f"{base_original_obj_dir}" oss_batch_dir = "batchPrint" print(f"is_use_debug_oss={is_use_debug_oss()}") if is_use_debug_oss(): oss_batch_dir = "batchPrint/debug_hsc" print(f"target_dir={target_dir}, batch_id={batch_id}") data_transfer = DataTransfer(f"{target_dir}/{batch_id}.json", f"{oss_batch_dir}/{batch_id}/{batch_id}.json", oss_client) data_transfer.upload_data() data_transfer = DataTransfer(f"{target_dir}/{batch_id}.jpg", f"{oss_batch_dir}/{batch_id}/{batch_id}.jpg", oss_client) data_transfer.upload_data() except Exception as e: print(f"失败: {batch_id}, 错误: {str(e)}") pass import open3d as o3d if __name__ == "__main__": parser = argparse.ArgumentParser() is_by_batch = True is_transform_save = False if is_by_batch: # 通过批次下载 """ parser.add_argument("--batch_id", type=str, required=True, help="batch_id") parser.add_argument("--workdir", type=str, required=True) parser.add_argument("--oss_config", type=str, required=True) args = parser.parse_args() """ # batch_id = args.batch_id batch_id = 10118 # workdir = args.workdir workdir = f"{print_data_dir}{batch_id}" # oss_config = args.oss_config if is_transform_save: download_transform_save_by_batch(batch_id, workdir, oss_config) else: download_datas_by_batch(batch_id, workdir, oss_config) """ oss_client = get_oss_client(args.oss_config) # 读取所有path list_print_model_info = read_paths_from_batch(args.batch_id) print(f"从文件读取了 {len(list_print_model_info)} 个path") # 批量下载 for batch_model_info in list_print_model_info: print(f"开始下载print_model_info: {batch_model_info}") download_data_by_batch(batch_model_info, args.workdir, oss_client) """ else: # 通过Json下载 parser.add_argument("--batch_id", type=str, required=True, help="包含PID列表的json文件路径") parser.add_argument("--workdir", type=str, required=True) parser.add_argument("--oss_config", type=str, required=True) args = parser.parse_args() if is_transform_save: download_transform_save_by_json(args.batch_id, args.workdir, args.oss_config) else: download_datas_by_json(args.batch_id, args.workdir, args.oss_config) """ oss_client = get_oss_client(args.oss_config) pid_file = os.path.join(args.workdir, "3DPrintLayout.json") print("pid_file=", pid_file) # 读取所有PID list_model_info = read_pids_from_json(pid_file) print(f"从文件读取了 {len(list_model_info)} 个PID") # 批量下载 for model_info in list_model_info: print(f"开始下载PID: {model_info}") download_data_by_json(model_info, args.workdir, oss_client) """