From daf8667065d1a857cce7533e897d7334da23d167 Mon Sep 17 00:00:00 2001 From: hesuicong Date: Sat, 29 Nov 2025 16:18:11 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- clound_print.py | 346 ++ download_print.py | 1569 +++++++++ download_print/run.yaml | 6 + download_print_out.py | 354 ++ get_lowest_position_of_center_ext.py | 847 +++++ get_lowest_position_of_z_out.py | 352 ++ grid_near_three.py | 179 + print.sh | 6 + print_factory_type_setting_obj_run.py | 291 ++ print_factory_type_setting_obj_run_GUI.py | 193 ++ print_merged_many_obj.py | 162 + print_mplot3d_point_cloud_layout.py | 3619 +++++++++++++++++++++ print_setting_run.py | 127 + print_setting_ui.py | 51 + print_show_weight_max_obj.py | 1120 +++++++ print_type_setting_gui.py | 346 ++ print_type_setting_gui.spec | 38 + print_type_setting_gui_multi.py | 126 + qt5_demo.py | 25 + qt5_demo.spec | 44 + sui_01.py | 275 ++ test.py | 19 + test_load_json.py | 482 +++ x_y_min_test.py | 52 + 读写时间测试.py | 152 + 读写时间测试2.py | 98 + 26 files changed, 10879 insertions(+) create mode 100644 clound_print.py create mode 100644 download_print.py create mode 100755 download_print/run.yaml create mode 100644 download_print_out.py create mode 100644 get_lowest_position_of_center_ext.py create mode 100644 get_lowest_position_of_z_out.py create mode 100644 grid_near_three.py create mode 100755 print.sh create mode 100644 print_factory_type_setting_obj_run.py create mode 100644 print_factory_type_setting_obj_run_GUI.py create mode 100644 print_merged_many_obj.py create mode 100644 print_mplot3d_point_cloud_layout.py create mode 100644 print_setting_run.py create mode 100644 print_setting_ui.py create mode 100644 print_show_weight_max_obj.py create mode 100644 print_type_setting_gui.py create mode 100644 print_type_setting_gui.spec create mode 100644 print_type_setting_gui_multi.py create mode 100644 qt5_demo.py create mode 100644 qt5_demo.spec create mode 100644 sui_01.py create mode 100644 test.py create mode 100644 test_load_json.py create mode 100644 x_y_min_test.py create mode 100644 读写时间测试.py create mode 100644 读写时间测试2.py diff --git a/clound_print.py b/clound_print.py new file mode 100644 index 0000000..f2bb70d --- /dev/null +++ b/clound_print.py @@ -0,0 +1,346 @@ +import os +import subprocess +import redis +import logging +from typing import cast +import requests +from download_print import download_datas_by_pre_layout +from download_print import BatchModelInfo +from download_print import is_test +from print_factory_type_setting_obj_run import print_type_setting_obj + + +# 如果没有 DISPLAY,自动启动 Xvfb +if "DISPLAY" not in os.environ: + import atexit + from time import sleep + + # 设置临时 XDG_RUNTIME_DIR + runtime_dir = f"/tmp/runtime-{os.getuid()}" + os.makedirs(runtime_dir, exist_ok=True) + os.environ["XDG_RUNTIME_DIR"] = runtime_dir + + # 启动 Xvfb + xvfb_cmd = ["Xvfb", ":99", "-screen", "0", "1024x768x24", "-nolisten", "tcp"] + xvfb_proc = subprocess.Popen(xvfb_cmd) + atexit.register(lambda: xvfb_proc.terminate()) # 退出时关闭 Xvfb + + # 设置 DISPLAY + os.environ["DISPLAY"] = ":99" + sleep(0.5) # 等待 Xvfb 启动 + +# redis_config = { +# "host": "127.0.0.1", +# "port": 6379, +# "db": 6, +# "password": "", # "kcV2000", +# "socket_timeout": 10, +# } + +redis_config = { + "host": "mp.api.suwa3d.com", + "port": 6379, + "db": 6, + "password": "kcV2000", # "kcV2000", + "socket_timeout": 10, +} + +import sys +class RedisTaskQueue: + """封装所有与 Redis 任务队列相关的操作。""" + def __init__(self, queue_key: str, redis_config: dict = redis_config): + self.queue_key = queue_key + self.client = redis.Redis(**redis_config) + + def get_length(self) -> int: + """ + 获取队列的长度。如果连接丢失,会尝试重新连接一次。 + """ + + try:# 检测queue_key是否存在 + length = 0 + self.client = redis.Redis(**redis_config) + if not self.client.exists(self.queue_key): + logging.warning(f"队列不存在: {self.queue_key}") + return 0 + + key_type = self.client.type(self.queue_key).decode('utf-8') # type: ignore + if key_type == 'list': + length = cast(int, self.client.llen(self.queue_key)) + elif key_type == 'set': + length = cast(int, self.client.scard(self.queue_key)) + return length + except redis.ConnectionError as e: + logging.warning(f"与 Redis 的连接中断: {e}。正在尝试重新连接...") + return 0 + except Exception as e: + logging.warning(f"获取队列长度失败: {e}") + return 0 + + def get_info(self) -> dict: + """ + 从指定的队列中取出一条信息 + + Returns: + dict: 取出的信息,如果队列为空或出错则返回空字典 + """ + try: + if not hasattr(self, 'client') or self.client is None: + self.client = redis.Redis(**redis_config) + + if not self.client.exists(self.queue_key): + logging.warning(f"队列不存在: {self.queue_key}") + return {} + + key_type = self.client.type(self.queue_key).decode('utf-8') + + if key_type == 'list': + # 从列表左侧弹出一个元素 + data = self.client.lpop(self.queue_key) + elif key_type == 'set': + # 从集合中随机弹出一个元素 + data = self.client.spop(self.queue_key) + else: + logging.warning(f"不支持的队列类型: {key_type}") + return {} + + if data is None: + return {} + + # print("data=", data) + + # 假设存储的是 JSON 格式的字符串 + try: + import json + return json.loads(data.decode('utf-8')) + except (json.JSONDecodeError, UnicodeDecodeError): + # 如果不是 JSON,返回原始字符串作为值 + return {"data": data.decode('utf-8')} + + except redis.ConnectionError as e: + logging.warning(f"Redis 连接中断: {e}") + return {} + except Exception as e: + logging.warning(f"获取队列信息失败: {e}") + return {} + + def __len__(self) -> int: + """让我们可以对实例使用 len() 函数,更符合 Python 风格。""" + return self.get_length() + +import time +import gc +def main(): + redis_queue_name = "pb:print_order_type_setting" + # while True: + try: + redis_queue = RedisTaskQueue(redis_queue_name) + task_num = redis_queue.get_length() + + # print("task_num=", task_num) + + if task_num <= 0: + time.sleep(10) + sys.exit(0) + return + + info = redis_queue.get_info() + + print("info=", info) + + process_clound_print(info) + + gc.collect() + + except Exception as e: + print(f"处理任务时出错: {e}") + + time.sleep(2.5) + + sys.exit(0) + +def test_main(): + # while True: + # data= {'machine_print_counts': 150, 'pre_batch_id': 9910032, 'print_machine_id': 14, 'print_orders': [{'counts': 1, 'layout_z': 35.62070959151145, 'model_weight': 41.862, 'order_id': 917900, 'pid': 376666, 'print_id': 126318}, {'counts': 1, 'layout_z': 31.380497732946978, 'model_weight': 35.158, 'order_id': 917810, 'pid': 376562, 'print_id': 126413}, {'counts': 1, 'layout_z': 26.136854969886777, 'model_weight': 21.403, 'order_id': 917820, 'pid': 376572, 'print_id': 126414}, {'counts': 1, 'layout_z': 37.967177899738104, 'model_weight': 30.48, 'order_id': 917922, 'pid': 376684, 'print_id': 126415}, {'counts': 1, 'layout_z': 20.368887098446734, 'model_weight': 13.097, 'order_id': 918228, 'pid': 377198, 'print_id': 126417}, {'counts': 1, 'layout_z': 21.4227336927951, 'model_weight': 15.859, 'order_id': 917576, 'pid': 376345, 'print_id': 126769}, {'counts': 1, 'layout_z': 27.539947066784066, 'model_weight': 25.185, 'order_id': 917811, 'pid': 376564, 'print_id': 126875}, {'counts': 1, 'layout_z': 31.692723822970393, 'model_weight': 32.796, 'order_id': 917821, 'pid': 376592, 'print_id': 126876}, {'counts': 1, 'layout_z': 20.20031720146163, 'model_weight': 8.664, 'order_id': 917822, 'pid': 376602, 'print_id': 126877}, {'counts': 1, 'layout_z': 22.41680629925537, 'model_weight': 15.206, 'order_id': 917827, 'pid': 376616, 'print_id': 126878}, {'counts': 1, 'layout_z': 25.443981671314514, 'model_weight': 11.965, 'order_id': 917879, 'pid': 376635, 'print_id': 126879}, {'counts': 1, 'layout_z': 31.09222672704002, 'model_weight': 20.318, 'order_id': 917902, 'pid': 376667, 'print_id': 126882}, {'counts': 1, 'layout_z': 30.3317820592175, 'model_weight': 32.964, 'order_id': 917903, 'pid': 376668, 'print_id': 126884}, {'counts': 1, 'layout_z': 28.69639626624317, 'model_weight': 27.751, 'order_id': 917904, 'pid': 376669, 'print_id': 126885}, {'counts': 1, 'layout_z': 28.043180029163203, 'model_weight': 19.866, 'order_id': 917917, 'pid': 376677, 'print_id': 126886}, {'counts': 1, 'layout_z': 28.58783468947201, 'model_weight': 25.505, 'order_id': 918196, 'pid': 377126, 'print_id': 126888}, {'counts': 1, 'layout_z': 43.55871899466554, 'model_weight': 63.512, 'order_id': 918201, 'pid': 377131, 'print_id': 126890}, {'counts': 1, 'layout_z': 32.52479624889478, 'model_weight': 22.943, 'order_id': 918203, 'pid': 377133, 'print_id': 126891}, {'counts': 1, 'layout_z': 33.21834123013511, 'model_weight': 42.163, 'order_id': 918206, 'pid': 377139, 'print_id': 126892}, {'counts': 1, 'layout_z': 27.81674335447885, 'model_weight': 15.455, 'order_id': 918207, 'pid': 377141, 'print_id': 126893}, {'counts': 1, 'layout_z': 29.78203639784899, 'model_weight': 29.696, 'order_id': 918231, 'pid': 377201, 'print_id': 126894}, {'counts': 1, 'layout_z': 23.66875736859606, 'model_weight': 13.39, 'order_id': 915637, 'pid': 373135, 'print_id': 126895}, {'counts': 1, 'layout_z': 29.00408914521088, 'model_weight': 19.553, 'order_id': 918246, 'pid': 377209, 'print_id': 126897}, {'counts': 1, 'layout_z': 24.44397251338635, 'model_weight': 15.424, 'order_id': 918325, 'pid': 376974, 'print_id': 126899}, {'counts': 1, 'layout_z': 31.755406379699707, 'model_weight': 31.131, 'order_id': 918492, 'pid': 377584, 'print_id': 126900}, {'counts': 1, 'layout_z': 38.03699946264237, 'model_weight': 47.412, 'order_id': 918493, 'pid': 377587, 'print_id': 126901}, {'counts': 1, 'layout_z': 43.52919799223749, 'model_weight': 37.842, 'order_id': 918494, 'pid': 377589, 'print_id': 126902}, {'counts': 1, 'layout_z': 25.906302369108843, 'model_weight': 18.64, 'order_id': 918495, 'pid': 377590, 'print_id': 126903}, {'counts': 1, 'layout_z': 36.02611978829816, 'model_weight': 43.814, 'order_id': 918496, 'pid': 377591, 'print_id': 126904}, {'counts': 1, 'layout_z': 26.020689766262052, 'model_weight': 18.513, 'order_id': 918497, 'pid': 377594, 'print_id': 126905}, {'counts': 1, 'layout_z': 23.988078117370605, 'model_weight': 15.753, 'order_id': 918498, 'pid': 377596, 'print_id': 126906}, {'counts': 1, 'layout_z': 27.457814063914604, 'model_weight': 23.507, 'order_id': 918500, 'pid': 377601, 'print_id': 126908}, {'counts': 1, 'layout_z': 29.306090320672993, 'model_weight': 25.043, 'order_id': 918501, 'pid': 377605, 'print_id': 126909}, {'counts': 1, 'layout_z': 29.070231478924583, 'model_weight': 25.598, 'order_id': 918504, 'pid': 377606, 'print_id': 126910}, {'counts': 1, 'layout_z': 24.314504768436095, 'model_weight': 18.68, 'order_id': 918555, 'pid': 377684, 'print_id': 126911}, {'counts': 1, 'layout_z': 37.52979171030315, 'model_weight': 53.541, 'order_id': 918558, 'pid': 377689, 'print_id': 126913}, {'counts': 1, 'layout_z': 26.530791960490333, 'model_weight': 15.082, 'order_id': 918560, 'pid': 377702, 'print_id': 126915}, {'counts': 1, 'layout_z': 23.604568481445312, 'model_weight': 21.614, 'order_id': 918583, 'pid': 377748, 'print_id': 126917}, {'counts': 1, 'layout_z': 27.865868478825355, 'model_weight': 22.6, 'order_id': 918636, 'pid': 377819, 'print_id': 126919}, {'counts': 1, 'layout_z': 30.786012113461286, 'model_weight': 25.699, 'order_id': 919105, 'pid': 378621, 'print_id': 126926}, {'counts': 1, 'layout_z': 27.39871813917613, 'model_weight': 20.061, 'order_id': 919106, 'pid': 378638, 'print_id': 126927}, {'counts': 1, 'layout_z': 37.645312213517265, 'model_weight': 42.591, 'order_id': 919018, 'pid': 377979, 'print_id': 126929}, {'counts': 1, 'layout_z': 53.26858516168712, 'model_weight': 130.558, 'order_id': 919110, 'pid': 378650, 'print_id': 126930}, {'counts': 1, 'layout_z': 27.335996763557635, 'model_weight': 19.724, 'order_id': 919024, 'pid': 378409, 'print_id': 126935}, {'counts': 1, 'layout_z': 23.94061705626402, 'model_weight': 15.026, 'order_id': 917479, 'pid': 376087, 'print_id': 126940}, {'counts': 1, 'layout_z': 29.543430158218627, 'model_weight': 19.191, 'order_id': 919012, 'pid': 378375, 'print_id': 126943}, {'counts': 1, 'layout_z': 27.454958866692724, 'model_weight': 22.776, 'order_id': 918499, 'pid': 377599, 'print_id': 126946}, {'counts': 1, 'layout_z': 29.367137022488468, 'model_weight': 22.455, 'order_id': 918559, 'pid': 377700, 'print_id': 126947}, {'counts': 1, 'layout_z': 29.209802512215997, 'model_weight': 22.524, 'order_id': 918569, 'pid': 377740, 'print_id': 126948}, {'counts': 1, 'layout_z': 25.396360323579223, 'model_weight': 20.442, 'order_id': 919228, 'pid': 378700, 'print_id': 126949}, {'counts': 1, 'layout_z': 28.232575120695085, 'model_weight': 24.391, 'order_id': 919229, 'pid': 378701, 'print_id': 126950}, {'counts': 1, 'layout_z': 31.82000351720909, 'model_weight': 35.189, 'order_id': 918580, 'pid': 377743, 'print_id': 126951}, {'counts': 1, 'layout_z': 18.000296592712402, 'model_weight': 5.264, 'order_id': 919230, 'pid': 378702, 'print_id': 126952}, {'counts': 1, 'layout_z': 32.6660716900792, 'model_weight': 21.182, 'order_id': 918581, 'pid': 377744, 'print_id': 126953}, {'counts': 1, 'layout_z': 20.317776580664418, 'model_weight': 6.793, 'order_id': 919231, 'pid': 378703, 'print_id': 126954}, {'counts': 1, 'layout_z': 23.08764836740975, 'model_weight': 12.265, 'order_id': 918582, 'pid': 377745, 'print_id': 126955}, {'counts': 1, 'layout_z': 22.843233481777308, 'model_weight': 12.229, 'order_id': 918783, 'pid': 378073, 'print_id': 126956}, {'counts': 1, 'layout_z': 17.441239242068544, 'model_weight': 4.888, 'order_id': 919233, 'pid': 378705, 'print_id': 126957}, {'counts': 1, 'layout_z': 18.11011046132721, 'model_weight': 5.835, 'order_id': 919234, 'pid': 378706, 'print_id': 126958}, {'counts': 1, 'layout_z': 16.299313366344762, 'model_weight': 4.977, 'order_id': 919232, 'pid': 378704, 'print_id': 126959}, {'counts': 1, 'layout_z': 16.977858537144982, 'model_weight': 4.423, 'order_id': 919235, 'pid': 378707, 'print_id': 126960}, {'counts': 1, 'layout_z': 19.66670311054518, 'model_weight': 6.836, 'order_id': 919236, 'pid': 378708, 'print_id': 126962}, {'counts': 1, 'layout_z': 16.450413533497088, 'model_weight': 4.404, 'order_id': 919237, 'pid': 378709, 'print_id': 126963}, {'counts': 1, 'layout_z': 19.12714231472859, 'model_weight': 6.788, 'order_id': 919238, 'pid': 378711, 'print_id': 126965}, {'counts': 1, 'layout_z': 15.662716096748905, 'model_weight': 5.399, 'order_id': 919239, 'pid': 378712, 'print_id': 126966}, {'counts': 1, 'layout_z': 17.984565068326873, 'model_weight': 5.224, 'order_id': 919240, 'pid': 378713, 'print_id': 126968}, {'counts': 1, 'layout_z': 18.779169416653758, 'model_weight': 6.097, 'order_id': 919243, 'pid': 378715, 'print_id': 126971}, {'counts': 1, 'layout_z': 17.88619578087645, 'model_weight': 6.155, 'order_id': 919244, 'pid': 378716, 'print_id': 126972}, {'counts': 1, 'layout_z': 18.061075125597053, 'model_weight': 6.012, 'order_id': 919245, 'pid': 378717, 'print_id': 126973}, {'counts': 1, 'layout_z': 18.02680370824573, 'model_weight': 5.248, 'order_id': 919246, 'pid': 378718, 'print_id': 126974}, {'counts': 1, 'layout_z': 18.23313922758454, 'model_weight': 4.889, 'order_id': 919247, 'pid': 378719, 'print_id': 126975}, {'counts': 1, 'layout_z': 15.10249208974721, 'model_weight': 4.305, 'order_id': 919248, 'pid': 378720, 'print_id': 126976}, {'counts': 1, 'layout_z': 26.393060472696845, 'model_weight': 18.787, 'order_id': 919249, 'pid': 378721, 'print_id': 126978}, {'counts': 1, 'layout_z': 30.27226940032898, 'model_weight': 20.279, 'order_id': 919250, 'pid': 378722, 'print_id': 126979}, {'counts': 1, 'layout_z': 33.275894165039055, 'model_weight': 30.53, 'order_id': 919026, 'pid': 378415, 'print_id': 126980}, {'counts': 1, 'layout_z': 19.061190510799264, 'model_weight': 7.443, 'order_id': 919251, 'pid': 378723, 'print_id': 126981}, {'counts': 1, 'layout_z': 22.21409211773961, 'model_weight': 7.147, 'order_id': 919252, 'pid': 378724, 'print_id': 126982}, {'counts': 1, 'layout_z': 31.450466048829572, 'model_weight': 30.067, 'order_id': 919030, 'pid': 378420, 'print_id': 126983}, {'counts': 1, 'layout_z': 16.95446233116205, 'model_weight': 5.067, 'order_id': 919253, 'pid': 378725, 'print_id': 126984}, {'counts': 1, 'layout_z': 26.16507487130793, 'model_weight': 18.558, 'order_id': 919031, 'pid': 378425, 'print_id': 126985}, {'counts': 1, 'layout_z': 16.012466453733403, 'model_weight': 6.216, 'order_id': 919255, 'pid': 378727, 'print_id': 126987}, {'counts': 1, 'layout_z': 20.30786979564451, 'model_weight': 7.149, 'order_id': 919256, 'pid': 378728, 'print_id': 126989}, {'counts': 1, 'layout_z': 21.65708123870792, 'model_weight': 8.508, 'order_id': 919257, 'pid': 378729, 'print_id': 126990}, {'counts': 1, 'layout_z': 17.955288402689746, 'model_weight': 6.014, 'order_id': 919258, 'pid': 378730, 'print_id': 126991}, {'counts': 1, 'layout_z': 19.3577859181912, 'model_weight': 7.269, 'order_id': 919259, 'pid': 378731, 'print_id': 126992}, {'counts': 1, 'layout_z': 16.81846154205583, 'model_weight': 4.525, 'order_id': 919260, 'pid': 378732, 'print_id': 126993}, {'counts': 1, 'layout_z': 27.407528874290133, 'model_weight': 20.443, 'order_id': 919261, 'pid': 378733, 'print_id': 126994}, {'counts': 1, 'layout_z': 34.374910104166425, 'model_weight': 24.786, 'order_id': 919263, 'pid': 378737, 'print_id': 126995}, {'counts': 1, 'layout_z': 22.392321514992663, 'model_weight': 13.752, 'order_id': 919264, 'pid': 378738, 'print_id': 126996}, {'counts': 1, 'layout_z': 18.011880302127423, 'model_weight': 5.473, 'order_id': 919265, 'pid': 378739, 'print_id': 126997}, {'counts': 1, 'layout_z': 28.383317524486593, 'model_weight': 21.997, 'order_id': 919266, 'pid': 378743, 'print_id': 126998}, {'counts': 1, 'layout_z': 23.672918816217553, 'model_weight': 16.46, 'order_id': 917480, 'pid': 376067, 'print_id': 126999}, {'counts': 1, 'layout_z': 16.479959151650828, 'model_weight': 5.821, 'order_id': 919267, 'pid': 378744, 'print_id': 127000}, {'counts': 1, 'layout_z': 35.155273295967994, 'model_weight': 48.551, 'order_id': 919268, 'pid': 378745, 'print_id': 127001}, {'counts': 1, 'layout_z': 25.231643428560584, 'model_weight': 19.358, 'order_id': 917478, 'pid': 376098, 'print_id': 127002}, {'counts': 1, 'layout_z': 16.688496289759275, 'model_weight': 5.743, 'order_id': 919269, 'pid': 378746, 'print_id': 127003}, {'counts': 1, 'layout_z': 27.167236561321484, 'model_weight': 21.521, 'order_id': 919270, 'pid': 378747, 'print_id': 127004}, {'counts': 1, 'layout_z': 25.463308095650035, 'model_weight': 10.912, 'order_id': 919271, 'pid': 378748, 'print_id': 127005}, {'counts': 1, 'layout_z': 25.206470863711544, 'model_weight': 10.581, 'order_id': 919272, 'pid': 378749, 'print_id': 127007}, {'counts': 1, 'layout_z': 26.25579441073758, 'model_weight': 13.336, 'order_id': 919273, 'pid': 378750, 'print_id': 127008}, {'counts': 1, 'layout_z': 22.231088289345216, 'model_weight': 12.438, 'order_id': 919014, 'pid': 378382, 'print_id': 127009}, {'counts': 1, 'layout_z': 26.20472244790368, 'model_weight': 14.247, 'order_id': 919274, 'pid': 378751, 'print_id': 127010}, {'counts': 1, 'layout_z': 25.164680222473894, 'model_weight': 15.369, 'order_id': 918502, 'pid': 377099, 'print_id': 127015}, {'counts': 1, 'layout_z': 24.15820278087022, 'model_weight': 16.648, 'order_id': 919219, 'pid': 378697, 'print_id': 127017}, {'counts': 1, 'layout_z': 16.35217992064665, 'model_weight': 5.306, 'order_id': 919182, 'pid': 378518, 'print_id': 127018}, {'counts': 1, 'layout_z': 21.25218355838687, 'model_weight': 10.043, 'order_id': 918230, 'pid': 377200, 'print_id': 127019}, {'counts': 1, 'layout_z': 33.729734231090276, 'model_weight': 49.911, 'order_id': 919276, 'pid': 378776, 'print_id': 127020}, {'counts': 1, 'layout_z': 24.560107711202825, 'model_weight': 8.349, 'order_id': 919282, 'pid': 378798, 'print_id': 127027}, {'counts': 1, 'layout_z': 21.12561534577449, 'model_weight': 7.557, 'order_id': 919287, 'pid': 378803, 'print_id': 127031}, {'counts': 1, 'layout_z': 25.091119425246845, 'model_weight': 14.908, 'order_id': 919292, 'pid': 378808, 'print_id': 127037}, {'counts': 1, 'layout_z': 26.77140074489941, 'model_weight': 21.134, 'order_id': 919293, 'pid': 378809, 'print_id': 127038}, {'counts': 1, 'layout_z': 27.517811122519632, 'model_weight': 20.529, 'order_id': 919294, 'pid': 378810, 'print_id': 127040}, {'counts': 1, 'layout_z': 30.62898230748005, 'model_weight': 22.053, 'order_id': 918507, 'pid': 377607, 'print_id': 127041}, {'counts': 1, 'layout_z': 27.056545744705435, 'model_weight': 20.734, 'order_id': 919295, 'pid': 378811, 'print_id': 127042}, {'counts': 1, 'layout_z': 23.059078062109105, 'model_weight': 13.642, 'order_id': 918508, 'pid': 377610, 'print_id': 127044}, {'counts': 1, 'layout_z': 24.373135283888665, 'model_weight': 18.763, 'order_id': 919297, 'pid': 378814, 'print_id': 127045}, {'counts': 1, 'layout_z': 29.93312254092476, 'model_weight': 28.184, 'order_id': 918556, 'pid': 377685, 'print_id': 127046}, {'counts': 1, 'layout_z': 35.433726822945225, 'model_weight': 46.28, 'order_id': 919298, 'pid': 378815, 'print_id': 127047}, {'counts': 1, 'layout_z': 32.148966009725754, 'model_weight': 43.827, 'order_id': 919299, 'pid': 378816, 'print_id': 127048}, {'counts': 1, 'layout_z': 24.873977270254855, 'model_weight': 17.694, 'order_id': 919300, 'pid': 378817, 'print_id': 127049}, {'counts': 1, 'layout_z': 28.89906702733768, 'model_weight': 19.09, 'order_id': 918568, 'pid': 377739, 'print_id': 127050}, {'counts': 1, 'layout_z': 29.072691539058013, 'model_weight': 24.701, 'order_id': 919301, 'pid': 378818, 'print_id': 127051}, {'counts': 1, 'layout_z': 22.799459457397457, 'model_weight': 17.986, 'order_id': 919302, 'pid': 378819, 'print_id': 127052}, {'counts': 1, 'layout_z': 28.30774288615256, 'model_weight': 23.62, 'order_id': 919303, 'pid': 378820, 'print_id': 127054}, {'counts': 1, 'layout_z': 27.685387987537027, 'model_weight': 18.804, 'order_id': 919304, 'pid': 378821, 'print_id': 127055}, {'counts': 1, 'layout_z': 21.501308067252822, 'model_weight': 7.512, 'order_id': 919305, 'pid': 378822, 'print_id': 127056}, {'counts': 1, 'layout_z': 31.91424955213119, 'model_weight': 37.428, 'order_id': 919307, 'pid': 378823, 'print_id': 127057}, {'counts': 1, 'layout_z': 32.039728096944266, 'model_weight': 35.473, 'order_id': 919308, 'pid': 378824, 'print_id': 127058}, {'counts': 1, 'layout_z': 33.27248765777313, 'model_weight': 37.264, 'order_id': 919309, 'pid': 378825, 'print_id': 127059}, {'counts': 1, 'layout_z': 31.01150813726163, 'model_weight': 27.421, 'order_id': 919310, 'pid': 378826, 'print_id': 127060}, {'counts': 1, 'layout_z': 25.02092682146464, 'model_weight': 16.694, 'order_id': 919311, 'pid': 378827, 'print_id': 127061}, {'counts': 1, 'layout_z': 27.649195956325578, 'model_weight': 20.131, 'order_id': 919314, 'pid': 378831, 'print_id': 127062}, {'counts': 1, 'layout_z': 29.323484806820524, 'model_weight': 23.913, 'order_id': 919313, 'pid': 378830, 'print_id': 127063}, {'counts': 1, 'layout_z': 29.93491564175814, 'model_weight': 27.02, 'order_id': 919312, 'pid': 378829, 'print_id': 127064}, {'counts': 1, 'layout_z': 29.220408560439278, 'model_weight': 25.891, 'order_id': 919316, 'pid': 378834, 'print_id': 127066}, {'counts': 1, 'layout_z': 27.086421547538176, 'model_weight': 21.186, 'order_id': 919317, 'pid': 378835, 'print_id': 127067}, {'counts': 1, 'layout_z': 29.687568424918922, 'model_weight': 22.681, 'order_id': 919318, 'pid': 378836, 'print_id': 127068}, {'counts': 1, 'layout_z': 30.30524230403575, 'model_weight': 26.048, 'order_id': 919319, 'pid': 378837, 'print_id': 127069}, {'counts': 1, 'layout_z': 24.082729373309707, 'model_weight': 18.03, 'order_id': 919322, 'pid': 378840, 'print_id': 127072}, {'counts': 1, 'layout_z': 27.8159610614131, 'model_weight': 21.29, 'order_id': 919321, 'pid': 378839, 'print_id': 127073}, {'counts': 1, 'layout_z': 28.93995122935326, 'model_weight': 21.318, 'order_id': 919326, 'pid': 378844, 'print_id': 127074}, {'counts': 1, 'layout_z': 27.58645357098898, 'model_weight': 23.726, 'order_id': 919324, 'pid': 378842, 'print_id': 127076}, {'counts': 1, 'layout_z': 26.337441702233544, 'model_weight': 23.781, 'order_id': 919329, 'pid': 378847, 'print_id': 127077}, {'counts': 1, 'layout_z': 25.032097288221692, 'model_weight': 17.614, 'order_id': 919328, 'pid': 378846, 'print_id': 127078}, {'counts': 1, 'layout_z': 27.0008910375328, 'model_weight': 20.06, 'order_id': 919327, 'pid': 378845, 'print_id': 127079}, {'counts': 1, 'layout_z': 28.573475817385898, 'model_weight': 22.823, 'order_id': 919334, 'pid': 378852, 'print_id': 127081}, {'counts': 1, 'layout_z': 23.589348437207477, 'model_weight': 19.486, 'order_id': 919335, 'pid': 378853, 'print_id': 127082}, {'counts': 1, 'layout_z': 28.29603645275283, 'model_weight': 20.464, 'order_id': 919330, 'pid': 378848, 'print_id': 127083}, {'counts': 1, 'layout_z': 36.094532611977854, 'model_weight': 19.122, 'order_id': 919333, 'pid': 378851, 'print_id': 127084}, {'counts': 1, 'layout_z': 26.209464742348054, 'model_weight': 20.434, 'order_id': 919331, 'pid': 378849, 'print_id': 127085}]} + data= {"machine_print_counts":50,"pre_batch_id":991112,"print_machine_id":22,"print_orders":[{"counts":1,"layout_z":3.543965775898459,"model_weight":12.833,"order_id":857420,"pid":268473,"print_id":85240},{"counts":1,"layout_z":7.798861189952786,"model_weight":13.784,"order_id":875986,"pid":305425,"print_id":88136}]} + + process_clound_print(data) + + gc.collect() + + time.sleep(5) + + sys.exit(0) + +import os +def clear_directory_recursive(dir_path): + """ + 递归清空目录:保留原目录,递归删除其所有内容和子内容。 + """ + if not os.path.isdir(dir_path): + print(f"路径 {dir_path} 不是一个有效目录。") + return + + for item_name in os.listdir(dir_path): + item_path = os.path.join(dir_path, item_name) + + if os.path.isfile(item_path) or os.path.islink(item_path): + # 如果是文件或符号链接,直接删除 + try: + os.unlink(item_path) + print(f"已删除: {item_path}") + except Exception as e: + print(f"删除失败 {item_path}: {e}") + elif os.path.isdir(item_path): + # 如果是子目录,递归调用函数清空它,然后删除这个空目录 + try: + clear_directory_recursive(item_path) # 递归清空子目录 + os.rmdir(item_path) # 删除现已为空的子目录 + print(f"已删除子目录: {item_path}") + except Exception as e: + print(f"删除子目录失败 {item_path}: {e}") + + print(f"目录 {dir_path} 下的内容已清空。") + +def process_clound_print(data): + print_ids = [] + list_print_model_info = [] + + selected_machine = "大机型" + + try: + import json + # parsed = json.loads(data.decode('utf-8')) + parsed = data + + pre_batch_id = parsed["pre_batch_id"] + print(f"pre_batch_id={pre_batch_id}") + + machine_print_counts = parsed["machine_print_counts"] + + machine_type = "big_machine" + if not is_test: + machine_type = parsed["machine_type"] + + if machine_type == "small_machine" : + selected_machine = "小机型" + else : + selected_machine = "大机型" + + print_orders = parsed.get("print_orders", []) + + if not print_orders: + print("没有找到订单数据") + + results = [] + + for index, order in enumerate(print_orders, 1): + order_info = { + 'index': index, + 'counts': order.get('counts'), + 'pid': order.get('pid'), + 'print_id': order.get('print_id'), + 'order_id': order.get('order_id'), + 'layout_z': order.get('layout_z') + } + + results.append(order_info) + + """ + # 打印订单信息 + print(f"订单 #{index}:") + print(f" counts: {order_info['counts']}") + print(f" PID: {order_info['pid']}") + print(f" print_id: {order_info['print_id']}") + print(f" 布局Z: {order_info['layout_z']}") + """ + + print_ids.append(order_info['print_id']) + + # list_print_model_info.append(batch_model_info) + + # print("-" * 40) + + except (json.JSONDecodeError, UnicodeDecodeError): + # 如果不是 JSON,返回原始字符串作为值 + print("error!") + + print(f"print_ids={print_ids}") + + # print_ids2 = [115988, 118411] + + # api_addr_pre = "https://mp.api.suwa3d.com" + # test = True + # if test: + # api_addr_pre = "http://127.0.0.1:8199" + # url = f"{api_addr_pre}/api/printOrder/getInfoByPrintIds?print_ids={print_ids}" + + url = f"https://mp.api.suwa3d.com/api/printOrder/getInfoByPrintIds?print_ids={print_ids}" + res = requests.get(url) + + data = res.json()["data"] + + for index, order in enumerate(data, 1): + order_info = { + 'order_id': order.get('order_id'), + 'pid': order.get('pid'), + 'print_order_id': order.get('print_order_id'), + 'real_size': order.get('real_size'), + 'path': order.get('path'), + 'quantity': order.get('quantity') + } + + results.append(order_info) + + model_size = f"{order_info['real_size']}_x{order_info['quantity']}" + # 打印订单信息 + # print(f"订单 #{order_info['order_id']}:") + # print(f" PID: {order_info['pid']}") + # print(f" print_order_id: {order_info['print_order_id']}") + # print(f" model_size: {model_size}") + # print(f" quantity: {order_info['quantity']}") + # print(f" path: {order_info['path']}") + + batch_model_info = BatchModelInfo( + order_id=order_info["order_id"], + pid=order_info["pid"], + print_order_id=order_info["print_order_id"], + model_size=model_size, + path=order_info["path"], + count= order_info["quantity"] if order_info["quantity"] > 1 else 1 + ) + list_print_model_info.append(batch_model_info) + + print("-" * 40) + + print_factory_type_dir="/root/print_factory_type" + workdir = f"{print_factory_type_dir}/data/{pre_batch_id}" + oss_config = f"{print_factory_type_dir}/print_factory_type_setting_big/download_print/run.yaml" + + clear_directory_recursive(f"{print_factory_type_dir}/data/") + clear_directory_recursive(f"{print_factory_type_dir}/full/") + + start_time = time.time() + if not download_datas_by_pre_layout(list_print_model_info, workdir, oss_config): + print(f"下载失败,排版终止 批次={pre_batch_id}") + return + print(f"下载耗时:{time.time()-start_time}") + # 下载结束 + + # 排版开始 + src_dir = pre_batch_id + selected_mode="紧凑" # 标准 紧凑 + output_format="JSON" # 模型 JSON + cache_type_setting_dir=f"{print_factory_type_dir}/data/{src_dir}/arrange" + base_original_obj_dir = f"{print_factory_type_dir}/data/{src_dir}" + print_type_setting_obj(base_original_obj_dir=base_original_obj_dir,cache_type_setting_dir=cache_type_setting_dir, + batch_id=pre_batch_id,show_chart=False,selected_mode=selected_mode,output_format=output_format,selected_machine=selected_machine) + #排版结束 + +if __name__ == '__main__': + if is_test: + test_main() + else: + print(f"is_test={is_test}, run main") + main() + diff --git a/download_print.py b/download_print.py new file mode 100644 index 0000000..4b79924 --- /dev/null +++ b/download_print.py @@ -0,0 +1,1569 @@ +import yaml +import oss2 +import os +from tqdm import tqdm +# from utils.log_utils import log_execution +import os +from pathlib import Path + +import numpy as np +import collections +import struct +import math +import os +import argparse + +is_test = False + +CameraModel = collections.namedtuple( + "CameraModel", ["model_id", "model_name", "num_params"] +) +Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"]) +BaseImage = collections.namedtuple( + "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"] +) +Point3D = collections.namedtuple( + "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"] +) +CAMERA_MODELS = { + CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), + CameraModel(model_id=1, model_name="PINHOLE", num_params=4), + CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), + CameraModel(model_id=3, model_name="RADIAL", num_params=5), + CameraModel(model_id=4, model_name="OPENCV", num_params=8), + CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), + CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), + CameraModel(model_id=7, model_name="FOV", num_params=5), + CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), + CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), + CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12), +} +CAMERA_MODEL_IDS = dict( + [(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS] +) +CAMERA_MODEL_NAMES = dict( + [(camera_model.model_name, camera_model) for camera_model in CAMERA_MODELS] +) + + +def qvec2rotmat(qvec): + return np.array( + [ + [ + 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, + 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], + 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], + ], + [ + 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], + 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, + 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], + ], + [ + 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], + 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], + 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, + ], + ] + ) + + +def rotmat2qvec(R): + Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat + K = ( + np.array( + [ + [Rxx - Ryy - Rzz, 0, 0, 0], + [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], + [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], + [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz], + ] + ) + / 3.0 + ) + eigvals, eigvecs = np.linalg.eigh(K) + qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] + if qvec[0] < 0: + qvec *= -1 + return qvec + + +class Image(BaseImage): + def qvec2rotmat(self): + return qvec2rotmat(self.qvec) + + +def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + + +def read_points3D_text(path): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DText(const std::string& path) + void Reconstruction::WritePoints3DText(const std::string& path) + """ + xyzs = None + rgbs = None + errors = None + num_points = 0 + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + num_points += 1 + + xyzs = np.empty((num_points, 3)) + rgbs = np.empty((num_points, 3)) + errors = np.empty((num_points, 1)) + count = 0 + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + xyz = np.array(tuple(map(float, elems[1:4]))) + rgb = np.array(tuple(map(int, elems[4:7]))) + error = np.array(float(elems[7])) + xyzs[count] = xyz + rgbs[count] = rgb + errors[count] = error + count += 1 + + return xyzs, rgbs, errors + + +def read_points3D_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DBinary(const std::string& path) + void Reconstruction::WritePoints3DBinary(const std::string& path) + """ + + with open(path_to_model_file, "rb") as fid: + num_points = read_next_bytes(fid, 8, "Q")[0] + + xyzs = np.empty((num_points, 3)) + rgbs = np.empty((num_points, 3)) + errors = np.empty((num_points, 1)) + + for p_id in range(num_points): + binary_point_line_properties = read_next_bytes( + fid, num_bytes=43, format_char_sequence="QdddBBBd" + ) + xyz = np.array(binary_point_line_properties[1:4]) + rgb = np.array(binary_point_line_properties[4:7]) + error = np.array(binary_point_line_properties[7]) + track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ + 0 + ] + track_elems = read_next_bytes( + fid, + num_bytes=8 * track_length, + format_char_sequence="ii" * track_length, + ) + xyzs[p_id] = xyz + rgbs[p_id] = rgb + errors[p_id] = error + return xyzs, rgbs, errors + + +def read_intrinsics_text(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py + """ + cameras = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + camera_id = int(elems[0]) + model = elems[1] + assert ( + model == "PINHOLE" + ), "While the loader support other types, the rest of the code assumes PINHOLE" + width = int(elems[2]) + height = int(elems[3]) + params = np.array(tuple(map(float, elems[4:]))) + cameras[camera_id] = Camera( + id=camera_id, model=model, width=width, height=height, params=params + ) + return cameras + + +def read_extrinsics_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_reg_images): + binary_image_properties = read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi" + ) + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + image_name = "" + current_char = read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + image_name += current_char.decode("utf-8") + current_char = read_next_bytes(fid, 1, "c")[0] + num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ + 0 + ] + x_y_id_s = read_next_bytes( + fid, + num_bytes=24 * num_points2D, + format_char_sequence="ddq" * num_points2D, + ) + xys = np.column_stack( + [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))] + ) + point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, + qvec=qvec, + tvec=tvec, + camera_id=camera_id, + name=image_name, + xys=xys, + point3D_ids=point3D_ids, + ) + return images + + +def read_intrinsics_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + cameras = {} + with open(path_to_model_file, "rb") as fid: + num_cameras = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_cameras): + camera_properties = read_next_bytes( + fid, num_bytes=24, format_char_sequence="iiQQ" + ) + camera_id = camera_properties[0] + model_id = camera_properties[1] + model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name + width = camera_properties[2] + height = camera_properties[3] + num_params = CAMERA_MODEL_IDS[model_id].num_params + params = read_next_bytes( + fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params + ) + cameras[camera_id] = Camera( + id=camera_id, + model=model_name, + width=width, + height=height, + params=np.array(params), + ) + assert len(cameras) == num_cameras + return cameras + + +def focal2fov(focal, pixels): + return 2 * math.atan(pixels / (2 * focal)) + + +def read_extrinsics_text(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py + """ + images = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + image_id = int(elems[0]) + qvec = np.array(tuple(map(float, elems[1:5]))) + tvec = np.array(tuple(map(float, elems[5:8]))) + camera_id = int(elems[8]) + image_name = elems[9] + elems = fid.readline().split() + xys = np.column_stack( + [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))] + ) + point3D_ids = np.array(tuple(map(int, elems[2::3]))) + images[image_id] = Image( + id=image_id, + qvec=qvec, + tvec=tvec, + camera_id=camera_id, + name=image_name, + xys=xys, + point3D_ids=point3D_ids, + ) + return images + + +def read_colmap_bin_array(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py + + :param path: path to the colmap binary file. + :return: nd array with the floating point values in the value + """ + with open(path, "rb") as fid: + width, height, channels = np.genfromtxt( + fid, delimiter="&", max_rows=1, usecols=(0, 1, 2), dtype=int + ) + fid.seek(0) + num_delimiter = 0 + byte = fid.read(1) + while True: + if byte == b"&": + num_delimiter += 1 + if num_delimiter >= 3: + break + byte = fid.read(1) + array = np.fromfile(fid, np.float32) + array = array.reshape((width, height, channels), order="F") + return np.transpose(array, (1, 0, 2)).squeeze() + + +def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + + +def write_next_bytes(fid, data, format_char_sequence, endian_character="<"): + """pack and write to a binary file. + :param fid: + :param data: data to send, if multiple elements are sent at the same time, + they should be encapsuled either in a list or a tuple + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + should be the same length as the data list or tuple + :param endian_character: Any of {@, =, <, >, !} + """ + if isinstance(data, (list, tuple)): + bytes = struct.pack(endian_character + format_char_sequence, *data) + else: + bytes = struct.pack(endian_character + format_char_sequence, data) + fid.write(bytes) + + +def read_cameras_text(path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::WriteCamerasText(const std::string& path) + void Reconstruction::ReadCamerasText(const std::string& path) + """ + cameras = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + camera_id = int(elems[0]) + model = elems[1] + width = int(elems[2]) + height = int(elems[3]) + params = np.array(tuple(map(float, elems[4:]))) + cameras[camera_id] = Camera( + id=camera_id, + model=model, + width=width, + height=height, + params=params, + ) + return cameras + + +def read_cameras_binary(path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + cameras = {} + with open(path_to_model_file, "rb") as fid: + num_cameras = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_cameras): + camera_properties = read_next_bytes( + fid, num_bytes=24, format_char_sequence="iiQQ" + ) + camera_id = camera_properties[0] + model_id = camera_properties[1] + model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name + width = camera_properties[2] + height = camera_properties[3] + num_params = CAMERA_MODEL_IDS[model_id].num_params + params = read_next_bytes( + fid, + num_bytes=8 * num_params, + format_char_sequence="d" * num_params, + ) + cameras[camera_id] = Camera( + id=camera_id, + model=model_name, + width=width, + height=height, + params=np.array(params), + ) + assert len(cameras) == num_cameras + return cameras + + +def write_cameras_text(cameras, path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::WriteCamerasText(const std::string& path) + void Reconstruction::ReadCamerasText(const std::string& path) + """ + HEADER = ( + "# Camera list with one line of data per camera:\n" + + "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n" + + "# Number of cameras: {}\n".format(len(cameras)) + ) + with open(path, "w") as fid: + fid.write(HEADER) + for _, cam in cameras.items(): + to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params] + line = " ".join([str(elem) for elem in to_write]) + fid.write(line + "\n") + + +def write_cameras_binary(cameras, path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + with open(path_to_model_file, "wb") as fid: + write_next_bytes(fid, len(cameras), "Q") + for _, cam in cameras.items(): + model_id = CAMERA_MODEL_NAMES[cam.model].model_id + camera_properties = [cam.id, model_id, cam.width, cam.height] + write_next_bytes(fid, camera_properties, "iiQQ") + for p in cam.params: + write_next_bytes(fid, float(p), "d") + return cameras + + +def read_images_text(path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadImagesText(const std::string& path) + void Reconstruction::WriteImagesText(const std::string& path) + """ + images = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + image_id = int(elems[0]) + qvec = np.array(tuple(map(float, elems[1:5]))) + tvec = np.array(tuple(map(float, elems[5:8]))) + camera_id = int(elems[8]) + image_name = elems[9] + elems = fid.readline().split() + xys = np.column_stack( + [ + tuple(map(float, elems[0::3])), + tuple(map(float, elems[1::3])), + ] + ) + point3D_ids = np.array(tuple(map(int, elems[2::3]))) + images[image_id] = Image( + id=image_id, + qvec=qvec, + tvec=tvec, + camera_id=camera_id, + name=image_name, + xys=xys, + point3D_ids=point3D_ids, + ) + return images + + +def read_images_binary(path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_reg_images): + binary_image_properties = read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi" + ) + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + binary_image_name = b"" + current_char = read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + binary_image_name += current_char + current_char = read_next_bytes(fid, 1, "c")[0] + image_name = binary_image_name.decode("utf-8") + num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ + 0 + ] + x_y_id_s = read_next_bytes( + fid, + num_bytes=24 * num_points2D, + format_char_sequence="ddq" * num_points2D, + ) + xys = np.column_stack( + [ + tuple(map(float, x_y_id_s[0::3])), + tuple(map(float, x_y_id_s[1::3])), + ] + ) + point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, + qvec=qvec, + tvec=tvec, + camera_id=camera_id, + name=image_name, + xys=xys, + point3D_ids=point3D_ids, + ) + return images + + +def write_images_text(images, path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadImagesText(const std::string& path) + void Reconstruction::WriteImagesText(const std::string& path) + """ + if len(images) == 0: + mean_observations = 0 + else: + mean_observations = sum( + (len(img.point3D_ids) for _, img in images.items()) + ) / len(images) + HEADER = ( + "# Image list with two lines of data per image:\n" + + "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + + "# POINTS2D[] as (X, Y, POINT3D_ID)\n" + + "# Number of images: {}, mean observations per image: {}\n".format( + len(images), mean_observations + ) + ) + + with open(path, "w") as fid: + fid.write(HEADER) + for _, img in images.items(): + image_header = [ + img.id, + *img.qvec, + *img.tvec, + img.camera_id, + img.name, + ] + first_line = " ".join(map(str, image_header)) + fid.write(first_line + "\n") + + points_strings = [] + for xy, point3D_id in zip(img.xys, img.point3D_ids): + points_strings.append(" ".join(map(str, [*xy, point3D_id]))) + fid.write(" ".join(points_strings) + "\n") + + +def write_images_binary(images, path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + with open(path_to_model_file, "wb") as fid: + write_next_bytes(fid, len(images), "Q") + for _, img in images.items(): + write_next_bytes(fid, img.id, "i") + write_next_bytes(fid, img.qvec.tolist(), "dddd") + write_next_bytes(fid, img.tvec.tolist(), "ddd") + write_next_bytes(fid, img.camera_id, "i") + for char in img.name: + write_next_bytes(fid, char.encode("utf-8"), "c") + write_next_bytes(fid, b"\x00", "c") + write_next_bytes(fid, len(img.point3D_ids), "Q") + for xy, p3d_id in zip(img.xys, img.point3D_ids): + write_next_bytes(fid, [*xy, p3d_id], "ddq") + + +def read_points3D_text(path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadPoints3DText(const std::string& path) + void Reconstruction::WritePoints3DText(const std::string& path) + """ + points3D = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + point3D_id = int(elems[0]) + xyz = np.array(tuple(map(float, elems[1:4]))) + rgb = np.array(tuple(map(int, elems[4:7]))) + error = float(elems[7]) + image_ids = np.array(tuple(map(int, elems[8::2]))) + point2D_idxs = np.array(tuple(map(int, elems[9::2]))) + points3D[point3D_id] = Point3D( + id=point3D_id, + xyz=xyz, + rgb=rgb, + error=error, + image_ids=image_ids, + point2D_idxs=point2D_idxs, + ) + return points3D + + +def read_points3D_binary(path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadPoints3DBinary(const std::string& path) + void Reconstruction::WritePoints3DBinary(const std::string& path) + """ + points3D = {} + with open(path_to_model_file, "rb") as fid: + num_points = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_points): + binary_point_line_properties = read_next_bytes( + fid, num_bytes=43, format_char_sequence="QdddBBBd" + ) + point3D_id = binary_point_line_properties[0] + xyz = np.array(binary_point_line_properties[1:4]) + rgb = np.array(binary_point_line_properties[4:7]) + error = np.array(binary_point_line_properties[7]) + track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[ + 0 + ] + track_elems = read_next_bytes( + fid, + num_bytes=8 * track_length, + format_char_sequence="ii" * track_length, + ) + image_ids = np.array(tuple(map(int, track_elems[0::2]))) + point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) + points3D[point3D_id] = Point3D( + id=point3D_id, + xyz=xyz, + rgb=rgb, + error=error, + image_ids=image_ids, + point2D_idxs=point2D_idxs, + ) + return points3D + + +def write_points3D_text(points3D, path): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadPoints3DText(const std::string& path) + void Reconstruction::WritePoints3DText(const std::string& path) + """ + if len(points3D) == 0: + mean_track_length = 0 + else: + mean_track_length = sum( + (len(pt.image_ids) for _, pt in points3D.items()) + ) / len(points3D) + HEADER = ( + "# 3D point list with one line of data per point:\n" + + "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n" + + "# Number of points: {}, mean track length: {}\n".format( + len(points3D), mean_track_length + ) + ) + + with open(path, "w") as fid: + fid.write(HEADER) + for _, pt in points3D.items(): + point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error] + fid.write(" ".join(map(str, point_header)) + " ") + track_strings = [] + for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs): + track_strings.append(" ".join(map(str, [image_id, point2D]))) + fid.write(" ".join(track_strings) + "\n") + + +def write_points3D_binary(points3D, path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadPoints3DBinary(const std::string& path) + void Reconstruction::WritePoints3DBinary(const std::string& path) + """ + with open(path_to_model_file, "wb") as fid: + write_next_bytes(fid, len(points3D), "Q") + for _, pt in points3D.items(): + write_next_bytes(fid, pt.id, "Q") + write_next_bytes(fid, pt.xyz.tolist(), "ddd") + write_next_bytes(fid, pt.rgb.tolist(), "BBB") + write_next_bytes(fid, pt.error, "d") + track_length = pt.image_ids.shape[0] + write_next_bytes(fid, track_length, "Q") + for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs): + write_next_bytes(fid, [image_id, point2D_id], "ii") + + +def detect_model_format(path, ext): + if ( + os.path.isfile(os.path.join(path, "cameras" + ext)) + and os.path.isfile(os.path.join(path, "images" + ext)) + and os.path.isfile(os.path.join(path, "points3D" + ext)) + ): + print("Detected model format: '" + ext + "'") + return True + + return False + + +def read_model(path, ext=""): + # try to detect the extension automatically + if ext == "": + if detect_model_format(path, ".bin"): + ext = ".bin" + elif detect_model_format(path, ".txt"): + ext = ".txt" + else: + print("Provide model format: '.bin' or '.txt'") + return + + if ext == ".txt": + cameras = read_cameras_text(os.path.join(path, "cameras" + ext)) + images = read_images_text(os.path.join(path, "images" + ext)) + points3D = read_points3D_text(os.path.join(path, "points3D") + ext) + else: + cameras = read_cameras_binary(os.path.join(path, "cameras" + ext)) + images = read_images_binary(os.path.join(path, "images" + ext)) + points3D = read_points3D_binary(os.path.join(path, "points3D") + ext) + return cameras, images, points3D + + +def write_model(cameras, images, points3D, path, ext=".bin"): + if ext == ".txt": + write_cameras_text(cameras, os.path.join(path, "cameras" + ext)) + write_images_text(images, os.path.join(path, "images" + ext)) + write_points3D_text(points3D, os.path.join(path, "points3D") + ext) + else: + write_cameras_binary(cameras, os.path.join(path, "cameras" + ext)) + write_images_binary(images, os.path.join(path, "images" + ext)) + write_points3D_binary(points3D, os.path.join(path, "points3D") + ext) + return cameras, images, points3D + + +def qvec2rotmat(qvec): + return np.array( + [ + [ + 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, + 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], + 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2], + ], + [ + 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], + 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2, + 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1], + ], + [ + 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], + 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], + 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2, + ], + ] + ) + + +def rotmat2qvec(R): + Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat + K = ( + np.array( + [ + [Rxx - Ryy - Rzz, 0, 0, 0], + [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], + [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], + [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz], + ] + ) + / 3.0 + ) + eigvals, eigvecs = np.linalg.eigh(K) + qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] + if qvec[0] < 0: + qvec *= -1 + return qvec + + +def get_oss_client(cfg_path): + with open(os.path.expanduser(cfg_path), "r") as config: + cfg = yaml.safe_load(config) + + AccessKeyId_down = cfg["run"]["down"]["AccessKeyId"] + AccessKeySecret_down = cfg["run"]["down"]["AccessKeySecret"] + Endpoint_down = cfg["run"]["down"]["Endpoint"] + Bucket_down = cfg["run"]["down"]["Bucket"] + + oss_client = oss2.Bucket( + oss2.Auth(AccessKeyId_down, AccessKeySecret_down), Endpoint_down, Bucket_down + ) + + return oss_client + +class DataTransfer: + ''' + 数据传输类 + ''' + def __init__(self, local_path: str, oss_path: str, oss_client: oss2.Bucket): + ''' + local_path: 本地输出路径 + oss_path: oss路径 + oss_client: oss客户端 + ''' + self.local_path = local_path + self.oss_path = oss_path.lstrip('/') + self.oss_client = oss_client + # self.description = description + + # @log_execution(self.description) + def download_data(self): + """ + 从 OSS 下载数据到本地,保持原有目录结构 + """ + + # 列出所有对象 + objects = [] + prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 + + for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): + if obj.key != prefix: # 跳过目录本身 + objects.append(obj.key) + + # 下载所有文件,添加进度条 + for obj_key in tqdm(objects, desc="下载进度"): + if obj_key.endswith('/'): + continue + + if "printId" in obj_key: + continue + + # 计算相对路径 + rel_path = obj_key[len(prefix):].lstrip('/') + # 构建本地完整路径 + local_path = os.path.join(self.local_path, rel_path) + + # 创建必要的目录 + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + # 下载文件 + self.oss_client.get_object_to_file(obj_key, local_path) + + print("download_data local_path=" + local_path) + + order_id: str + pid: str + model_height: str + + def download_data_rename_json(self, json_model_info): + """ + 从 OSS 下载数据到本地,保持原有目录结构 + """ + + # 列出所有对象 + objects = [] + prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 + + for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): + if obj.key != prefix: # 跳过目录本身 + objects.append(obj.key) + + # 下载所有文件,添加进度条 + for obj_key in tqdm(objects, desc="下载进度"): + if obj_key.endswith('/'): + continue + + if "printId" in obj_key: + continue + + # 计算相对路径 + rel_path = obj_key[len(prefix):].lstrip('/') + + file_dir, file_name = os.path.split(rel_path) + file_base, file_ext = os.path.splitext(file_name) + + # 根据文件后缀名进行重命名 + if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: + # 对于.mtl和图片文件,在原名前加order_id + new_file_name = f"{json_model_info.order_id}_{file_name}" + # new_file_name = file_name + elif file_ext.lower() == '.obj': + # 对于.obj文件,完全重命名 + new_file_name = f"{json_model_info.obj_name}" + else: + # 其他文件类型保持原名 + new_file_name = file_name + print("new_file_name=", new_file_name) + + # 构建新的相对路径 + if file_dir: # 如果有子目录 + new_rel_path = os.path.join(file_dir, new_file_name) + else: + new_rel_path = new_file_name + + # 构建本地完整路径 + local_path = os.path.join(self.local_path, new_rel_path) + + # 创建必要的目录 + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + # 下载文件 + self.oss_client.get_object_to_file(obj_key, local_path) + + if file_ext == '.obj': # 10MB以上 + try: + # 使用临时文件避免内存问题 [8](@ref) + temp_path = local_path + '.tmp' + with open(local_path, 'r', encoding='utf-8') as f_in, \ + open(temp_path, 'w', encoding='utf-8') as f_out: + + mtllib_modified = False + for line in f_in: + if not mtllib_modified and line.strip().startswith('mtllib '): + parts = line.split(' ', 1) + if len(parts) > 1: + old_mtl_name = parts[1].strip() + new_mtl_name = f"{json_model_info.order_id}_{old_mtl_name}" + f_out.write(f"mtllib {new_mtl_name}\n") + mtllib_modified = True + continue + f_out.write(line) + + os.replace(temp_path, local_path) # 原子性替换 + + except IOError as e: + print(f"处理大文件 {local_path} 时出错: {e}") + if os.path.exists(temp_path): + os.remove(temp_path) + + # 优化后的.obj文件处理逻辑 + if file_ext == '.mtl': + try: + # 使用更高效的文件读取方式 [6,8](@ref) + with open(local_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 使用字符串方法直接查找和替换,避免不必要的循环 [9](@ref) + lines = content.split('\n') + mtllib_modified = False + + for i, line in enumerate(lines): + stripped_line = line.strip() + if not mtllib_modified and stripped_line.startswith('map_Kd '): + # 更高效的分割方式 [9](@ref) + parts = line.split(' ', 1) + if len(parts) > 1: + old_name = parts[1].strip() + new_name = f"{json_model_info.order_id}_{old_name}" + lines[i] = f"map_Kd {new_name}" + mtllib_modified = True + print(f"已更新材质库引用: {old_name} -> {new_name}") + break # 找到第一个后立即退出 + + # 批量写入,减少I/O操作 [6](@ref) + with open(local_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + + except IOError as e: + print(f"处理文件 {local_path} 时出错: {e}") + except UnicodeDecodeError as e: + print(f"文件编码错误 {local_path}: {e}") + + print(f"下载文件: {obj_key} -> {local_path}") + + def download_data_rename_batch(self, batch_model_info): + """ + 从 OSS 下载数据到本地,保持原有目录结构 + """ + + # 列出所有对象 + objects = [] + prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 + + prefix_exists = False + + for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): + prefix_exists = True + if obj.key != prefix: # 跳过目录本身 + objects.append(obj.key) + + if not prefix_exists: + print(f"前缀 '{prefix}' 下没有找到任何文件或目录。") + return False + else: + print(f"前缀 '{prefix}' 存在,共找到 {len(objects)} 个对象。") + + # 下载所有文件,添加进度条 + for obj_key in tqdm(objects, desc="下载进度"): + if obj_key.endswith('/'): + print("下载 endswith('/'") + continue + + if "printId" in obj_key: + print(f"下载 in obj_key") + continue + + # 计算相对路径 + rel_path = obj_key[len(prefix):].lstrip('/') + + file_dir, file_name = os.path.split(rel_path) + file_base, file_ext = os.path.splitext(file_name) + + # 根据文件后缀名进行重命名 + if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: + # 对于.mtl和图片文件,在原名前加order_id + new_file_name = f"{batch_model_info.order_id}_{file_name}" + # new_file_name = file_name + elif file_ext.lower() == '.obj': + # 对于.obj文件,完全重命名 + new_file_name = f"{batch_model_info.order_id}_{batch_model_info.pid}_P{batch_model_info.print_order_id}_{batch_model_info.model_size}{file_ext}" + else: + # 其他文件类型保持原名 + new_file_name = file_name + + # 构建新的相对路径 + if file_dir: # 如果有子目录 + new_rel_path = os.path.join(file_dir, new_file_name) + else: + new_rel_path = new_file_name + + # 构建本地完整路径 + local_path = os.path.join(self.local_path, new_rel_path) + + # 创建必要的目录 + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + # 下载文件 + self.oss_client.get_object_to_file(obj_key, local_path) + + if file_ext == '.obj': # 10MB以上 + try: + # 使用临时文件避免内存问题 [8](@ref) + temp_path = local_path + '.tmp' + with open(local_path, 'r', encoding='utf-8') as f_in, \ + open(temp_path, 'w', encoding='utf-8') as f_out: + + mtllib_modified = False + for line in f_in: + if not mtllib_modified and line.strip().startswith('mtllib '): + parts = line.split(' ', 1) + if len(parts) > 1: + old_mtl_name = parts[1].strip() + new_mtl_name = f"{batch_model_info.order_id}_{old_mtl_name}" + f_out.write(f"mtllib {new_mtl_name}\n") + mtllib_modified = True + continue + f_out.write(line) + + os.replace(temp_path, local_path) # 原子性替换 + + except IOError as e: + print(f"处理大文件 {local_path} 时出错: {e}") + if os.path.exists(temp_path): + os.remove(temp_path) + + # 优化后的.obj文件处理逻辑 + if file_ext == '.mtl': + try: + # 使用更高效的文件读取方式 [6,8](@ref) + with open(local_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 使用字符串方法直接查找和替换,避免不必要的循环 [9](@ref) + lines = content.split('\n') + mtllib_modified = False + + for i, line in enumerate(lines): + stripped_line = line.strip() + if not mtllib_modified and stripped_line.startswith('map_Kd '): + # 更高效的分割方式 [9](@ref) + parts = line.split(' ', 1) + if len(parts) > 1: + old_name = parts[1].strip() + new_name = f"{batch_model_info.order_id}_{old_name}" + lines[i] = f"map_Kd {new_name}" + mtllib_modified = True + print(f"已更新材质库引用: {old_name} -> {new_name}") + break # 找到第一个后立即退出 + + # 批量写入,减少I/O操作 [6](@ref) + with open(local_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + + except IOError as e: + print(f"处理文件 {local_path} 时出错: {e}") + except UnicodeDecodeError as e: + print(f"文件编码错误 {local_path}: {e}") + + print(f"下载文件: {obj_key} -> {local_path}") + + return True + + def download_single_file(self): + """ + 下载单个文件从OSS到本地 + """ + # 确保本地目录存在 + os.makedirs(os.path.dirname(self.local_path), exist_ok=True) + + # 直接下载文件 + try: + self.oss_client.get_object_to_file(self.oss_path, self.local_path) + print(f"文件已下载到: {self.local_path}") + except oss2.exceptions.NoSuchKey: + print(f"OSS文件不存在: {self.oss_path}") + + def upload_data(self): + ''' + 上传数据到OSS + ''' + # 检测本地路径是否存在 + if not os.path.exists(self.local_path): + raise FileNotFoundError(f"本地路径不存在: {self.local_path}") + + # 判断本地路径是文件还是目录 + if os.path.isfile(self.local_path): + local_suffix = Path(self.local_path).suffix + oss_suffix = Path(self.oss_path).suffix + + if oss_suffix and oss_suffix != local_suffix: + # 后缀名不一致,上传到指定文件夹下的同名文件 + oss_dir = os.path.dirname(self.oss_path) + oss_target_path = os.path.join(oss_dir, os.path.basename(self.local_path)) + else: + # 后缀名一致,上传到指定OSS路径 + oss_target_path = self.oss_path + + # 上传文件 + self.oss_client.put_object_from_file(oss_target_path, self.local_path) + + print(f"文件已上传到: {oss_target_path}") + + elif os.path.isdir(self.local_path): + oss_suffix = Path(self.oss_path).suffix + if oss_suffix: + raise ValueError("不能将目录上传到具有后缀名的OSS路径。") + + # 遍历本地目录并上传 + for root, dirs, files in os.walk(self.local_path): + for file in files: + local_file_path = os.path.join(root, file) + relative_path = os.path.relpath(local_file_path, self.local_path) + oss_file_path = os.path.join(self.oss_path, relative_path).replace("\\", "/") + + # 创建必要的目录 + oss_dir = os.path.dirname(oss_file_path) + + # 上传文件 + self.oss_client.put_object_from_file(oss_file_path, local_file_path) + print(f"文件已上传到: {oss_file_path}") + else: + raise ValueError(f"无效的本地路径类型: {self.local_path}") + +import requests +import json +import shutil + +def get_api(url): + try: + response = requests.get(url) + response.raise_for_status() # 检查请求是否成功 + response = json.loads(response.text) + if response.get("code") != 1000: + raise Exception(f"Error fetching URL {url}: {response.get('message')}") + else: + return response + except requests.exceptions.RequestException as e: + raise Exception(f"Error fetching URL {url}: {e}") + +from dataclasses import dataclass +@dataclass +class JSONModelInfo: + obj_name: str + order_id: str + pid: str + model_height: str + +def read_pids_from_json(pid_file): + """从文件读取所有PID""" + # with open(pid_file, 'r') as f: + # # 过滤掉空行并去除每行首尾的空白字符 + # return [line.strip() for line in f if line.strip()] + + json_path = pid_file + + """ + 加载JSON文件,读取所有模型信息,应用变换后返回模型列表 + """ + # 检查JSON文件是否存在 + if not os.path.exists(json_path): + print(f"错误: JSON文件不存在 - {json_path}") + return [] + + # 读取JSON文件 + try: + with open(json_path, 'r') as f: + data = json.load(f) + except Exception as e: + print(f"读取JSON文件失败: {e}") + return [] + + list_model_info = [] + # 处理每个模型 + for model in data.get('models', []): + obj_name = model.get('file_name', '') + + parts = obj_name.split('_') + + order_id = parts[0] + pid = parts[1] + model_height = parts[3] + + model_info = JSONModelInfo( + obj_name=obj_name, + order_id=order_id, + pid=pid, + model_height=model_height + ) + list_model_info.append(model_info) + + return list_model_info, data + +def download_data_by_json(model_info, workdir, oss_client ): + ''' + 下载卡通化数据 + ''' + try: + pid = model_info.pid + model_height = model_info.model_height + # target_dir = f"{workdir}/{pid}_image" + target_dir = f"{workdir}" + + # {"code":1000,"data":"base_cartoon/badge/101/3/init_obj","message":"success1"} + # https://mp.api.suwa3d.com/api/order/getOssSuffixByOrderId?order_id=879312 + + url = f"https://mp.api.suwa3d.com/api/order/getOssSuffixByOrderId?order_id={model_info.order_id}" + res = requests.get(url) + + data = res.json()["data"] + # print("datas=",data) + data = data.replace("/init_obj", "") + + print("target_dir=", target_dir) + # download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/base/model/{model_height}/", oss_client) + # download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/base_cartoon/badge/101/3/{model_height}/", oss_client) + download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/{data}/{model_height}/", oss_client) + + download_textures.download_data_rename_json(model_info) + # 下载后检查目标文件夹是否为空 + if os.path.exists(target_dir) and not os.listdir(target_dir): + shutil.rmtree(target_dir) + print(f"下载后检查发现目标文件夹为空,已删除: {target_dir}") + except Exception as e: + print(f"卡通图片下载失败: {pid}, 错误: {str(e)}") + pass + +@dataclass +class BatchModelInfo: + order_id: str + pid: str + print_order_id: str + model_size: str + path: str + count: str + +def read_paths_from_batch(batch_id): + + url = f"https://mp.api.suwa3d.com/api/printOrder/getInfoByPrintBatchId?batch_id={batch_id}" + res = requests.get(url) + + datas = res.json()["data"] + print("datas=",datas) + + list_print_model_info = [] + for data in datas: + batch_model_info = BatchModelInfo( + order_id=data["order_id"], + pid=data["pid"], + print_order_id=data["print_order_id"], + model_size=data["model_size"], + path=data["path"], + count=data["quantity"] + + ) + list_print_model_info.append(batch_model_info) + + return list_print_model_info, datas + +def download_data_by_batch(batch_model_info, workdir, oss_client ): + try: + target_dir = f"{workdir}" + + print("target_dir=", target_dir) + path = batch_model_info.path + download_textures = DataTransfer(target_dir, f"{path}/", oss_client) + + if not download_textures.download_data_rename_batch(batch_model_info): + return False + # 下载后检查目标文件夹是否为空 + if os.path.exists(target_dir) and not os.listdir(target_dir): + shutil.rmtree(target_dir) + print(f"下载后检查发现目标文件夹为空,已删除: {target_dir}") + except Exception as e: + print(f"卡通图片下载失败: {path}, 错误: {str(e)}") + pass + + return True + +def download_datas_by_batch(batch_id, workdir, oss_config): + oss_client = get_oss_client(oss_config) + + # 读取所有path + list_print_model_info, datas = read_paths_from_batch(batch_id) + print(f"从文件读取了 {len(list_print_model_info)} 个path") + # 批量下载 + for batch_model_info in list_print_model_info: + print(f"开始下载print_model_info: {batch_model_info}") + download_data_by_batch(batch_model_info, workdir, oss_client) + + return datas + +def download_datas_by_pre_layout(list_print_model_info, workdir, oss_config): + oss_client = get_oss_client(oss_config) + + print(f"从文件读取了 {len(list_print_model_info)} 个path") + # 批量下载 + for batch_model_info in list_print_model_info: + print(f"开始下载print_model_info: {batch_model_info}") + if not download_data_by_batch(batch_model_info, workdir, oss_client): + return False + return True + +def download_transform_save_by_batch(batch_id, workdir, oss_config): + datas = download_datas_by_batch(batch_id, workdir, oss_config) + print("datas=", datas) + layout_data = datas["layout_data"] + + original_obj_pid_dir = workdir + cache_type_setting_dir = os.path.join(workdir, "arrange") + Path(cache_type_setting_dir).mkdir(exist_ok=True) + + print(f"original_obj_pid_dir={original_obj_pid_dir}, cache_type_setting_dir={cache_type_setting_dir}") + + transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir) + + +def download_datas_by_json(pid_file, workdir, oss_config): + oss_client = get_oss_client(oss_config) + + #json_path = os.path.join(workdir, "3DPrintLayout.json") + json_path = os.path.join(workdir, f"{pid_file}.json") + + # 读取所有PID + list_model_info, data = read_pids_from_json(json_path) + print(f"从文件读取了 {len(list_model_info)} 个PID") + # 批量下载 + for model_info in list_model_info: + print(f"开始下载PID: {model_info}") + download_data_by_json(model_info, args.workdir, oss_client) + + return data + +def download_transform_save_by_json(pid_file, workdir, oss_config): + layout_data = download_datas_by_json(pid_file, workdir, oss_config) + + original_obj_pid_dir = workdir + cache_type_setting_dir = os.path.join(workdir, "arrange") + Path(cache_type_setting_dir).mkdir(exist_ok=True) + + print(f"original_obj_pid_dir={original_obj_pid_dir}, cache_type_setting_dir={cache_type_setting_dir}") + + transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir) + +def upload_result(base_original_obj_dir, oss_config, batch_id): + + oss_client = get_oss_client(oss_config) + + try: + target_dir = f"{base_original_obj_dir}" + + oss_batch_dir = "batchPrint" + if is_test: + oss_batch_dir = "batchPrint/debug_hsc" + + print(f"target_dir={target_dir}, batch_id={batch_id}") + data_transfer = DataTransfer(f"{target_dir}/{batch_id}.json", f"{oss_batch_dir}/{batch_id}/{batch_id}.json", oss_client) + data_transfer.upload_data() + data_transfer = DataTransfer(f"{target_dir}/{batch_id}.jpg", f"{oss_batch_dir}/{batch_id}/{batch_id}.jpg", oss_client) + data_transfer.upload_data() + + except Exception as e: + print(f"失败: {batch_id}, 错误: {str(e)}") + pass + +import open3d as o3d +from test_load_json import custom_mesh_transform + +def transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir): + meshes = [] + # 小打印机380*345,需要偏移-380,-345 + need_offset = True + for model in layout_data["models"]: + transform = model.get('transform', {}) + + homo_matrix = transform["homo_matrix"] # 获取存储的列表 + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + obj_name = model.get('file_name', '') + obj_path = os.path.join(original_obj_pid_dir, obj_name) + # 加载网格 + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + original_vertices = np.asarray(mesh.vertices) + + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + # 如果 need_offset 为 True,应用额外的偏移 + if need_offset: + # 应用偏移 (-380, -345, 0) + offset = np.array([-380, -345, 0]) + transformed_vertices += offset + print(f"已对模型 {obj_name} 应用偏移: {offset}") + + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + meshes.append(mesh) + + # obj_path_arrange = os.path.join(original_obj_pid_dir, "arrange") + obj_path_arrange = cache_type_setting_dir + if not os.path.exists(obj_path_arrange): + os.mkdir(obj_path_arrange) + obj_path_arrange_obj = os.path.join(obj_path_arrange, obj_name) + print("obj_path_arrange_obj", obj_path_arrange_obj) + mesh.compute_vertex_normals() + o3d.io.write_triangle_mesh(obj_path_arrange_obj, mesh,write_triangle_uvs=True) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + is_by_batch = True + is_transform_save = False + if is_by_batch: + # 通过批次下载 + """ + parser.add_argument("--batch_id", type=str, required=True, help="batch_id") + parser.add_argument("--workdir", type=str, required=True) + parser.add_argument("--oss_config", type=str, required=True) + args = parser.parse_args() + """ + # batch_id = args.batch_id + batch_id = 2499 + + print_factory_type_dir="/root/print_factory_type" + # workdir = args.workdir + workdir = f"{print_factory_type_dir}/{batch_id}" + + # oss_config = args.oss_config + oss_config = f"{print_factory_type_dir}/print_factory_type_setting_big/download_print/run.yaml" + + if is_transform_save: + download_transform_save_by_batch(batch_id, workdir, oss_config) + else: + download_datas_by_batch(batch_id, workdir, oss_config) + + """ + oss_client = get_oss_client(args.oss_config) + # 读取所有path + list_print_model_info = read_paths_from_batch(args.batch_id) + print(f"从文件读取了 {len(list_print_model_info)} 个path") + # 批量下载 + for batch_model_info in list_print_model_info: + print(f"开始下载print_model_info: {batch_model_info}") + download_data_by_batch(batch_model_info, args.workdir, oss_client) + """ + else: + # 通过Json下载 + parser.add_argument("--batch_id", type=str, required=True, help="包含PID列表的json文件路径") + parser.add_argument("--workdir", type=str, required=True) + parser.add_argument("--oss_config", type=str, required=True) + args = parser.parse_args() + + if is_transform_save: + download_transform_save_by_json(args.batch_id, args.workdir, args.oss_config) + else: + download_datas_by_json(args.batch_id, args.workdir, args.oss_config) + + """ + oss_client = get_oss_client(args.oss_config) + pid_file = os.path.join(args.workdir, "3DPrintLayout.json") + + print("pid_file=", pid_file) + # 读取所有PID + list_model_info = read_pids_from_json(pid_file) + print(f"从文件读取了 {len(list_model_info)} 个PID") + # 批量下载 + for model_info in list_model_info: + print(f"开始下载PID: {model_info}") + download_data_by_json(model_info, args.workdir, oss_client) + """ + diff --git a/download_print/run.yaml b/download_print/run.yaml new file mode 100755 index 0000000..2aef35a --- /dev/null +++ b/download_print/run.yaml @@ -0,0 +1,6 @@ +run: + down: + AccessKeyId: 'LTAI5tJDLxK6wBdHE9Nu443G' + AccessKeySecret: 'sBN7IK4ozSE9nNtmD3dmDSuiS24SZq' + Endpoint: 'oss-cn-shanghai.aliyuncs.com' + Bucket: 'suwa3d-securedata' diff --git a/download_print_out.py b/download_print_out.py new file mode 100644 index 0000000..26626e6 --- /dev/null +++ b/download_print_out.py @@ -0,0 +1,354 @@ +import yaml +import oss2 +import os +from tqdm import tqdm +import os +from pathlib import Path +import numpy as np +import os +import argparse + +import open3d as o3d + +def custom_mesh_transform(vertices, transform_matrix): + """ + 手动实现网格变换:对每个顶点应用齐次变换矩阵 + 参数: + vertices: 网格顶点数组 (N, 3) + transform_matrix: 4x4 齐次变换矩阵 + 返回: + 变换后的顶点数组 (N, 3) + """ + # 1. 顶点转齐次坐标 (N, 3) → (N, 4) + homogeneous_vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) + + # 2. 应用变换矩阵:矩阵乘法 (4x4) * (4xN) → (4xN) + transformed_homogeneous = transform_matrix @ homogeneous_vertices.T + + # 3. 转回非齐次坐标 (3xN) → (N, 3) + transformed_vertices = transformed_homogeneous[:3, :].T + return transformed_vertices + +class DataTransfer: + ''' + 数据传输类 + ''' + def __init__(self, local_path: str, oss_path: str, oss_client: oss2.Bucket): + ''' + local_path: 本地输出路径 + oss_path: oss路径 + oss_client: oss客户端 + ''' + self.local_path = local_path + self.oss_path = oss_path.lstrip('/') + self.oss_client = oss_client + + order_id: str + pid: str + model_height: str + + def download_data_rename_json(self, json_model_info): + """ + 从 OSS 下载数据到本地,保持原有目录结构 + """ + + # 列出所有对象 + objects = [] + prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 + + for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): + if obj.key != prefix: # 跳过目录本身 + objects.append(obj.key) + + # 下载所有文件,添加进度条 + for obj_key in tqdm(objects, desc="下载进度"): + if obj_key.endswith('/'): + continue + + if "printId" in obj_key: + continue + + # 计算相对路径 + rel_path = obj_key[len(prefix):].lstrip('/') + + file_dir, file_name = os.path.split(rel_path) + file_base, file_ext = os.path.splitext(file_name) + + # 根据文件后缀名进行重命名 + if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: + # 对于.mtl和图片文件,在原名前加order_id + new_file_name = f"{json_model_info.order_id}_{file_name}" + # new_file_name = file_name + elif file_ext.lower() == '.obj': + # 对于.obj文件,完全重命名 + new_file_name = f"{json_model_info.obj_name}" + else: + # 其他文件类型保持原名 + new_file_name = file_name + print("new_file_name=", new_file_name) + + # 构建新的相对路径 + if file_dir: # 如果有子目录 + new_rel_path = os.path.join(file_dir, new_file_name) + else: + new_rel_path = new_file_name + + # 构建本地完整路径 + local_path = os.path.join(self.local_path, new_rel_path) + + # 创建必要的目录 + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + # 下载文件 + self.oss_client.get_object_to_file(obj_key, local_path) + + if file_ext == '.obj': # 10MB以上 + try: + # 使用临时文件避免内存问题 [8](@ref) + temp_path = local_path + '.tmp' + with open(local_path, 'r', encoding='utf-8') as f_in, \ + open(temp_path, 'w', encoding='utf-8') as f_out: + + mtllib_modified = False + for line in f_in: + if not mtllib_modified and line.strip().startswith('mtllib '): + parts = line.split(' ', 1) + if len(parts) > 1: + old_mtl_name = parts[1].strip() + new_mtl_name = f"{json_model_info.order_id}_{old_mtl_name}" + f_out.write(f"mtllib {new_mtl_name}\n") + mtllib_modified = True + continue + f_out.write(line) + + os.replace(temp_path, local_path) # 原子性替换 + + except IOError as e: + print(f"处理大文件 {local_path} 时出错: {e}") + if os.path.exists(temp_path): + os.remove(temp_path) + + # 优化后的.obj文件处理逻辑 + if file_ext == '.mtl': + try: + # 使用更高效的文件读取方式 [6,8](@ref) + with open(local_path, 'r', encoding='utf-8') as f: + content = f.read() + + # 使用字符串方法直接查找和替换,避免不必要的循环 [9](@ref) + lines = content.split('\n') + mtllib_modified = False + + for i, line in enumerate(lines): + stripped_line = line.strip() + if not mtllib_modified and stripped_line.startswith('map_Kd '): + # 更高效的分割方式 [9](@ref) + parts = line.split(' ', 1) + if len(parts) > 1: + old_name = parts[1].strip() + new_name = f"{json_model_info.order_id}_{old_name}" + lines[i] = f"map_Kd {new_name}" + mtllib_modified = True + print(f"已更新材质库引用: {old_name} -> {new_name}") + break # 找到第一个后立即退出 + + # 批量写入,减少I/O操作 [6](@ref) + with open(local_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + + except IOError as e: + print(f"处理文件 {local_path} 时出错: {e}") + except UnicodeDecodeError as e: + print(f"文件编码错误 {local_path}: {e}") + + print(f"下载文件: {obj_key} -> {local_path}") + +import requests +import json +import shutil + +def get_api(url): + try: + response = requests.get(url) + response.raise_for_status() # 检查请求是否成功 + response = json.loads(response.text) + if response.get("code") != 1000: + raise Exception(f"Error fetching URL {url}: {response.get('message')}") + else: + return response + except requests.exceptions.RequestException as e: + raise Exception(f"Error fetching URL {url}: {e}") + +from dataclasses import dataclass +@dataclass +class JSONModelInfo: + obj_name: str + order_id: str + pid: str + model_height: str + +def read_pids_from_json(pid_file): + """从文件读取所有PID""" + + json_path = pid_file + + """ + 加载JSON文件,读取所有模型信息,应用变换后返回模型列表 + """ + # 检查JSON文件是否存在 + if not os.path.exists(json_path): + print(f"错误: JSON文件不存在 - {json_path}") + return [] + + # 读取JSON文件 + try: + with open(json_path, 'r') as f: + data = json.load(f) + except Exception as e: + print(f"读取JSON文件失败: {e}") + return [] + + list_model_info = [] + # 处理每个模型 + for model in data.get('models', []): + obj_name = model.get('file_name', '') + + parts = obj_name.split('_') + + order_id = parts[0] + pid = parts[1] + model_height = parts[3] + + model_info = JSONModelInfo( + obj_name=obj_name, + order_id=order_id, + pid=pid, + model_height=model_height + ) + list_model_info.append(model_info) + + return list_model_info, data + +def download_data_by_json(model_info, workdir, oss_client ): + try: + pid = model_info.pid + model_height = model_info.model_height + # target_dir = f"{workdir}/{pid}_image" + target_dir = f"{workdir}" + + url = f"https://mp.api.suwa3d.com/api/order/getOssSuffixByOrderId?order_id={model_info.order_id}" + res = requests.get(url) + + data = res.json()["data"] + # print("datas=",data) + data = data.replace("/init_obj", "") + + print("target_dir=", target_dir) + download_textures = DataTransfer(target_dir, f"objs/download/print/{pid}/{data}/{model_height}/", oss_client) + + download_textures.download_data_rename_json(model_info) + # 下载后检查目标文件夹是否为空 + if os.path.exists(target_dir) and not os.listdir(target_dir): + shutil.rmtree(target_dir) + print(f"下载后检查发现目标文件夹为空,已删除: {target_dir}") + except Exception as e: + print(f"卡通图片下载失败: {pid}, 错误: {str(e)}") + pass + +def get_oss_client(cfg_path): + with open(os.path.expanduser(cfg_path), "r") as config: + cfg = yaml.safe_load(config) + + AccessKeyId_down = cfg["run"]["down"]["AccessKeyId"] + AccessKeySecret_down = cfg["run"]["down"]["AccessKeySecret"] + Endpoint_down = cfg["run"]["down"]["Endpoint"] + Bucket_down = cfg["run"]["down"]["Bucket"] + + oss_client = oss2.Bucket( + oss2.Auth(AccessKeyId_down, AccessKeySecret_down), Endpoint_down, Bucket_down + ) + + return oss_client + +def download_datas_by_json(pid_file, workdir, oss_config): + oss_client = get_oss_client(oss_config) + + # json_path = os.path.join(workdir, "3DPrintLayout.json") + json_path = os.path.join(workdir, f"{pid_file}.json") + + # 读取所有PID + list_model_info, data = read_pids_from_json(json_path) + print(f"从文件读取了 {len(list_model_info)} 个PID") + # 批量下载 + for model_info in list_model_info: + print(f"开始下载PID: {model_info}") + download_data_by_json(model_info, args.workdir, oss_client) + + return data + +def download_transform_save_by_json(pid_file, workdir, oss_config): + layout_data = download_datas_by_json(pid_file, workdir, oss_config) + + original_obj_pid_dir = workdir + cache_type_setting_dir = os.path.join(workdir, "arrange") + Path(cache_type_setting_dir).mkdir(exist_ok=True) + + print(f"original_obj_pid_dir={original_obj_pid_dir}, cache_type_setting_dir={cache_type_setting_dir}") + + transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir) + +def transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir): + meshes = [] + # 小打印机380*345,需要偏移-380,-345 + need_offset = True + for model in layout_data["models"]: + transform = model.get('transform', {}) + + homo_matrix = transform["homo_matrix"] # 获取存储的列表 + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + obj_name = model.get('file_name', '') + obj_path = os.path.join(original_obj_pid_dir, obj_name) + # 加载网格 + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + original_vertices = np.asarray(mesh.vertices) + + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + # 如果 need_offset 为 True,应用额外的偏移 + if need_offset: + # 应用偏移 (-380, -345, 0) + offset = np.array([-380, -345, 0]) + transformed_vertices += offset + print(f"已对模型 {obj_name} 应用偏移: {offset}") + + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + meshes.append(mesh) + + # obj_path_arrange = os.path.join(original_obj_pid_dir, "arrange") + obj_path_arrange = cache_type_setting_dir + if not os.path.exists(obj_path_arrange): + os.mkdir(obj_path_arrange) + obj_path_arrange_obj = os.path.join(obj_path_arrange, obj_name) + print("obj_path_arrange_obj", obj_path_arrange_obj) + mesh.compute_vertex_normals() + o3d.io.write_triangle_mesh(obj_path_arrange_obj, mesh,write_triangle_uvs=True) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("--pid_file", type=str, required=True, help="批次号, 也是json文件名") + parser.add_argument("--workdir", type=str, required=True, help="本代码文件所在的目录") + parser.add_argument("--oss_config", type=str, required=True) + args = parser.parse_args() + + download_transform_save_by_json(args.pid_file, args.workdir, args.oss_config) + \ No newline at end of file diff --git a/get_lowest_position_of_center_ext.py b/get_lowest_position_of_center_ext.py new file mode 100644 index 0000000..f64a5b2 --- /dev/null +++ b/get_lowest_position_of_center_ext.py @@ -0,0 +1,847 @@ +import open3d as o3d +import numpy as np +import copy +import time +from get_lowest_position_of_z_out import get_lowest_position_of_z_out + +# 对外部提供的获取最低z的接口 +def get_lowest_position_of_center_out2(obj_path): + + total_matrix = np.eye(4) + + mesh_obj = o3d.io.read_triangle_mesh(obj_path) + + voxel_size = 3 + + return get_lowest_position_of_center(mesh_obj, obj_path, total_matrix, voxel_size) + +def calculate_rotation_and_center_of_mass(angle_x, angle_y, angle_z, points): + """计算某一组旋转角度后的重心""" + # 计算绕X轴、Y轴和Z轴的旋转矩阵 + R_x = np.array([ + [1, 0, 0], + [0, np.cos(np.radians(angle_x)), -np.sin(np.radians(angle_x))], + [0, np.sin(np.radians(angle_x)), np.cos(np.radians(angle_x))] + ]) + + R_y = np.array([ + [np.cos(np.radians(angle_y)), 0, np.sin(np.radians(angle_y))], + [0, 1, 0], + [-np.sin(np.radians(angle_y)), 0, np.cos(np.radians(angle_y))] + ]) + + R_z = np.array([ + [np.cos(np.radians(angle_z)), -np.sin(np.radians(angle_z)), 0], + [np.sin(np.radians(angle_z)), np.cos(np.radians(angle_z)), 0], + [0, 0, 1] + ]) + + # 综合旋转矩阵 + R = R_z @ R_y @ R_x + + # 执行旋转 + rotated_points = points @ R.T + + # 计算最小z值 + min_z = np.min(rotated_points[:, 2]) + + # 计算平移向量,将最小Z值平移到0 + translation_vector = np.array([0, 0, -min_z]) + rotated_points += translation_vector + + # 计算重心 + center_of_mass = np.mean(rotated_points, axis=0) + + return center_of_mass[2], angle_x, angle_y, angle_z + +def parallel_rotation4(points, angle_step=4): + """仅绕 Y 轴旋转(假设 X/Z 轴不影响目标函数)""" + min_center = float('inf') + + for angle_x in range(-45, 45, angle_step): + for angle_y in range(0, 360, angle_step): + center_z, ax, ay, _ = calculate_rotation_and_center_of_mass(angle_x, angle_y, 0, points) + if center_z < min_center: + min_center = center_z + best_angle_x = ax + best_angle_y = ay + + return (best_angle_x, best_angle_y, 0, min_center) + +import numpy as np +from numba import cuda +import math + +# CUDA核函数:计算所有旋转角度下的重心高度 +@cuda.jit +def compute_centers_kernel(points, centers, angle_x_start, angle_x_step, angle_y_start, angle_y_step, num_x, num_y): + i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x + j = cuda.blockIdx.y * cuda.blockDim.y + cuda.threadIdx.y + + if i >= num_x or j >= num_y: + return + + # 获取整数角度值 + angle_x = angle_x_start + i * angle_x_step + angle_y = angle_y_start + j * angle_y_step + + rx = math.radians(float(angle_x)) # 使用 float() 进行转换 + ry = math.radians(float(angle_y)) + rz = 0.0 + + # 计算旋转矩阵 + cos_x = math.cos(rx) + sin_x = math.sin(rx) + cos_y = math.cos(ry) + sin_y = math.sin(ry) + cos_z = math.cos(rz) + sin_z = math.sin(rz) + + # 旋转矩阵: R = R_z * R_y * R_x + R00 = cos_z * cos_y + R01 = cos_z * sin_y * sin_x - sin_z * cos_x + R02 = cos_z * sin_y * cos_x + sin_z * sin_x + R10 = sin_z * cos_y + R11 = sin_z * sin_y * sin_x + cos_z * cos_x + R12 = sin_z * sin_y * cos_x - cos_z * sin_x + R20 = -sin_y + R21 = cos_y * sin_x + R22 = cos_y * cos_x + + n = points.shape[0] + min_z = 1e10 + + # 第一遍:计算最小Z值 + for k in range(n): + x = points[k, 0] + y = points[k, 1] + z = points[k, 2] + + x_rot = R00 * x + R01 * y + R02 * z + y_rot = R10 * x + R11 * y + R12 * z + z_rot = R20 * x + R21 * y + R22 * z + + if z_rot < min_z: + min_z = z_rot + + total_z = 0.0 + # 第二遍:平移并计算Z坐标和 + for k in range(n): + x = points[k, 0] + y = points[k, 1] + z = points[k, 2] + + x_rot = R00 * x + R01 * y + R02 * z + y_rot = R10 * x + R11 * y + R12 * z + z_rot = R20 * x + R21 * y + R22 * z + + z_trans = z_rot - min_z + total_z += z_trans + + center_z = total_z / n + centers[i, j] = center_z + +# CUDA版本的并行旋转计算 +def parallel_rotation4_cuda(points, angle_step=4): + angle_x_start = -45 + angle_x_end = 45 + angle_y_start = 0 + angle_y_end = 360 + + num_x = int((angle_x_end - angle_x_start) / angle_step) + num_y = int((angle_y_end - angle_y_start) / angle_step) + + # 将点云数据复制到GPU + d_points = cuda.to_device(points.astype(np.float32)) + d_centers = cuda.device_array((num_x, num_y), dtype=np.float32) + + # 配置线程块和网格 + threadsperblock = (16, 16) + blockspergrid_x = (num_x + threadsperblock[0] - 1) // threadsperblock[0] + blockspergrid_y = (num_y + threadsperblock[1] - 1) // threadsperblock[1] + blockspergrid = (blockspergrid_x, blockspergrid_y) + + # 启动核函数 + compute_centers_kernel[blockspergrid, threadsperblock]( + d_points, d_centers, angle_x_start, angle_step, angle_y_start, angle_step, num_x, num_y + ) + + # 将结果复制回主机 + centers = d_centers.copy_to_host() + + # 找到最小重心值的索引 + min_index = np.argmin(centers) + i = min_index // num_y + j = min_index % num_y + + best_angle_x = angle_x_start + i * angle_step + best_angle_y = angle_y_start + j * angle_step + min_center = centers[i, j] + + return best_angle_x, best_angle_y, 0, min_center + +def read_mesh(obj_path, simple=True): + mesh_obj = o3d.io.read_triangle_mesh(obj_path) + return mesh_obj + +def compute_mesh_center2(vertices): + """ + 计算网格质心 + + 参数: + vertices: 顶点坐标数组,形状为(N, 3)的NumPy数组或列表 + + 返回: + centroid: 质心坐标的NumPy数组 [x, y, z] + """ + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + n = len(vertices) # 顶点数量 + # 初始化坐标累加器 + sum_x, sum_y, sum_z = 0.0, 0.0, 0.0 + + start_time1 = time.time() + # 遍历所有顶点累加坐标值 + for vertex in vertices: + sum_x += vertex[0] + sum_y += vertex[1] + sum_z += vertex[2] + print("compute_mesh_center1 time", time.time()-start_time1) + + # 计算各坐标轴的平均值 + centroid = np.array([sum_x / n, sum_y / n, sum_z / n]) + return centroid + +def compute_mesh_center(vertices): + """ + 计算网格质心(优化版) + + 参数: + vertices: 顶点坐标数组,形状为(N, 3)的NumPy数组或列表 + + 返回: + centroid: 质心坐标的NumPy数组 [x, y, z] + """ + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + # 确保vertices是NumPy数组 + vertices_np = np.asarray(vertices) + + # 使用NumPy的mean函数直接计算均值(向量化操作) + centroid = np.mean(vertices_np, axis=0) + + return centroid + +def get_lowest_position_of_center_ext3(mesh_obj, obj_path, voxel_size = 3): + + best_angle_x, best_angle_y, best_angle_z, z_mean_min, pcd_transformed= get_lowest_position_of_center2(mesh_obj, obj_path, voxel_size) + + return best_angle_x, best_angle_y, best_angle_z, z_mean_min + + +def get_lowest_position_of_center_ext2(mesh_obj, obj_path, total_matrix, voxel_size): + + # total_matrix, z_mean_min = get_lowest_position_of_center(obj_path, total_matrix, voxel_size) + temp_matrix, z_mean_min = get_lowest_position_of_center(mesh_obj, obj_path, np.eye(4), voxel_size) + # print("temp_matrix=",temp_matrix,voxel_size,mesh_obj) + + total_matrix = temp_matrix @ total_matrix + + return total_matrix, z_mean_min + +def get_lowest_position_of_center_ext(obj_path, total_matrix): + + temp_matrix, z_max = get_lowest_position_of_z_out(obj_path) + + total_matrix = temp_matrix @ total_matrix + + return total_matrix, z_max + +def down_sample(pcd, voxel_size, farthest_sample = False): + original_num = len(pcd.points) + target_samples = 1500 # 1000 + num_samples = min(target_samples, original_num) + + # 第一步:使用体素下采样快速减少点数量 + # voxel_size = 3 + if farthest_sample: + pcd_voxel = pcd.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_voxel = pcd.voxel_down_sample(voxel_size) + down_num = len(pcd_voxel.points) + # print(f"original_num={original_num}, down_num={down_num}") + + # 第二步:仅在必要时进行最远点下采样 + if len(pcd_voxel.points) > target_samples and False: + pcd_downsampled = pcd_voxel.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_downsampled = pcd_voxel + + return pcd_downsampled + +def get_lowest_position_of_center(mesh_obj, obj_path, total_matrix, voxel_size): + + # print(f"obj_path={obj_path}, get_lowest_position_of_center voxel_size={voxel_size}") + start_time1 = time.time() + + vertices = np.asarray(mesh_obj.vertices) + + # 确保网格有顶点 + if len(vertices) == 0: + # raise ValueError(f"Mesh has no vertices: {obj_path}") + print(f"Warning: Mesh has no vertices: {mesh_obj}") + return None + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + # print("voxel_size",voxel_size,obj_path, len(pcd.points), len(mesh_obj.vertices)) + + # 对点云进行下采样(体素网格法) + #""" + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + if len(np.asarray(pcd_downsampled.points)) <= 0: + bbox = pcd.get_axis_aligned_bounding_box() + volume = bbox.volume() + + # print(f"len(pcd.points)={len(pcd.points)}, volume={volume}") + + # 处理体积为零的情况 + if volume <= 0: + # 计算点云的实际范围 + points = np.asarray(pcd.points) + if len(points) > 0: + min_bound = np.min(points, axis=0) + max_bound = np.max(points, axis=0) + extent = max_bound - min_bound + + # 确保最小维度至少为0.01 + min_dimension = max(0.01, np.min(extent)) + volume = min_dimension ** 3 + else: + volume = 1.0 # 最后的安全回退 + + print(f"Warning: Zero volume detected, using approximated volume {volume:.6f} for {obj_path}") + + # 安全计算密度 - 防止除零错误 + if len(pcd.points) > 0 and volume > 0: + original_density = len(pcd.points) / volume + voxel_size = max(0.01, min(10.0, 0.5 / (max(1e-6, original_density) ** 0.33))) + else: + # 当点数为0或体积为0时使用默认体素大小 + voxel_size = 1.0 # 默认值 + + print(f"Recalculated voxel_size: {voxel_size} for {obj_path}") + + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + original_num = len(pcd.points) + target_samples = 1000 + num_samples = min(target_samples, original_num) + + # print("get_lowest_position_of_center1 time", time.time()-start_time1) + start_time2 = time.time() + # 确保下采样后有点云 + if len(np.asarray(pcd_downsampled.points)) == 0: + # 使用原始点云作为后备 + pcd_downsampled = pcd + print(f"Warning: Using original point cloud for {obj_path} as downsampling produced no points") + + points = np.asarray(pcd_downsampled.points) + + # 初始化最小重心Y的值 + min_center_of_mass_y = float('inf') + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation4(points, angle_step=3) + + # 使用最佳角度进行旋转并平移obj + pcd_transformed = copy.deepcopy(mesh_obj) + + + # 最佳角度旋转 + R_x = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([1, 0, 0]) * np.radians(best_angle_x)) + pcd_transformed.rotate(R_x) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * np.radians(best_angle_y)) + pcd_transformed.rotate(R_y) + R_z = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 0, 1]) * np.radians(best_angle_z)) + pcd_transformed.rotate(R_z) + + T_x = np.eye(4) + T_x[:3, :3] = R_x + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_x @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_y = np.eye(4) + T_y[:3, :3] = R_y + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_y @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_z = np.eye(4) + T_z[:3, :3] = R_z + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_z @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + #试着旋转180,让脸朝上 + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans1 = np.eye(4) + T_trans1[:3, 3] = translation_vector + total_matrix = T_trans1 @ total_matrix + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean1 = np.mean(vertices[:, 2]) + z_max1 = np.max(vertices[:, 2]) + + angle_rad = np.pi + #print("旋转前质心:", pcd_transformed.get_center()) + #print("旋转前点示例:", np.asarray(pcd_transformed.vertices)[:3]) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + centroid = pcd_transformed.get_center() + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + # 构建绕中心点旋转的变换矩阵[3](@ref) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + R_y180 = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + T_rotate = np.eye(4) + T_rotate[:3, :3] = R_y180 + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_rotate @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + #print("旋转后质心:", pcd_transformed.get_center()) + #print("旋转后点示例:", np.asarray(pcd_transformed.vertices)[:3]) + + # + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + max_z = np.max(vertices[:, 2]) + # print("min_z1", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + # translation_vector = np.array([0,0,-min_z + (min_z-max_z),]) + # print("translation_vector1",translation_vector) + pcd_transformed.translate(translation_vector) + + T_trans2 = np.eye(4) + T_trans2[:3, 3] = translation_vector + translation = total_matrix[:3, 3] + # print("translation_vector2",translation_vector) + # print(1,translation) + + total_matrix = T_trans2 @ total_matrix + translation = total_matrix[:3, 3] + # print(2,translation) + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean2 = np.mean(vertices[:, 2]) + z_max2 = np.max(vertices[:, 2]) + + # print(f"get_lowest_position_of_center z_max1={z_max1}, z_max2={z_max2}, len={len(pcd_transformed.vertices)}, obj_path={obj_path}") + + if (z_mean2 > z_mean1): + # if (z_max2 > z_max1): + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + centroid = pcd_transformed.get_center() + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + # 构建反向旋转矩阵 + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + T_rotate_inv = np.eye(4) + T_rotate_inv[:3, :3] = R_y + # 完整的反向绕中心旋转矩阵 + T_rot_center_inv = T_origin_to_center @ T_rotate_inv @ T_center_to_origin + total_matrix = T_rot_center_inv @ total_matrix + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + # print("min_z2", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans3 = np.eye(4) + T_trans3[:3, 3] = translation_vector + total_matrix = T_trans3 @ total_matrix + + # z_mean_min = min(z_mean1, z_mean2) + z_max_min = min(z_max1, z_max2) + + # print("get_lowest_position_of_center2 time", time.time()-start_time2) + return total_matrix, z_max_min + +import requests +import json +import re + +def is_valid_float_string(s): + # 匹配科学计数法或普通小数,允许开头和末尾的空格 + pattern = r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$' + return bool(re.match(pattern, s.strip())) + +def safe_convert_to_float(s): + """ + 尝试将字符串安全转换为float,处理一些不完整情况。 + """ + s = s.strip().lower() # 去除空格,统一为小写 + + # 处理空字符串或完全非数字的情况 + if not s or s in ['na', 'nan', 'inf', 'null', 'none']: + return None # 或者可以根据需要返回 float('nan') + + # 检查是否为有效的浮点数字符串 + if is_valid_float_string(s): + return float(s) + + # 处理类似 '0.00000000e' 的情况(缺少指数) + if s.endswith('e'): + # 尝试添加指数 '0' -> 'e0' + try: + return float(s + '0') + except ValueError: + pass # 如果添加'e0'后仍然失败,则继续下面的异常处理 + + # 更激进的清理:移除非数字、小数点、负号和指数e以外的所有字符 + # 注意:这可能破坏有特定格式的字符串,慎用 + cleaned_s = re.sub(r'[^\d\.eE-]', '', s) + try: + return float(cleaned_s) + except ValueError: + pass + + # 如果所有尝试都失败,返回None或抛出异常 + return None + +def string_to_matrix(data_string): + """ + 将字符串转换为NumPy浮点数矩阵,并处理可能的转换错误。 + """ + # 分割字符串 + lines = data_string.strip().split(';') + matrix = [] + + for line in lines: + num_list = line.split() + float_row = [] + for num in num_list: + # 使用安全转换函数 + value = safe_convert_to_float(num) + if value is None: + # 处理转换失败,例如记录日志、使用NaN代替 + print(f"警告: 无法转换字符串 '{num}',将其替换为NaN。") + value = np.nan # 用NaN标记缺失或无效值 + float_row.append(value) + matrix.append(float_row) + + return np.array(matrix) + +import ast + +def get_lowest_position_of_center_net(printId, total_matrix): + print("get_lowest_position_of_center_net", printId) + + url = f"https://mp.api.suwa3d.com/api/printOrder/infoByPrintId?printId={printId}" + res = requests.get(url) + + datas = res.json()["data"]["layout"] + print("datas=", datas) + + homo_matrix_str = datas.get("homo_matrix") + print("homo_matrix_str=", homo_matrix_str) + + # 1. 去除字符串首尾的方括号 + str_cleaned = homo_matrix_str.strip('[]') + # 2. 按行分割字符串 + rows = str_cleaned.split('\n') + + # 3. 修复:处理每行中的逗号问题 + matrix_list = [] + for row in rows: + if row.strip() == '': + continue + + # 去除行首尾的方括号和空格 + row_cleaned = row.strip(' []') + # 按逗号分割,但过滤掉空字符串 + elements = [elem.strip() for elem in row_cleaned.split(',') if elem.strip() != ''] + + # 进一步清理每个元素:去除可能残留的逗号和方括号 + cleaned_elements = [] + for elem in elements: + # 去除元素中可能存在的逗号、方括号和空格 + elem_cleaned = elem.strip(' ,[]') + if elem_cleaned != '': + cleaned_elements.append(elem_cleaned) + + if cleaned_elements: # 只添加非空行 + matrix_list.append(cleaned_elements) + + print("matrix_list=", matrix_list) + + # 4. 安全地转换为浮点数数组(带错误处理) + try: + reconstructed_matrix = np.array(matrix_list, dtype=float) + except ValueError as e: + print(f"转换矩阵时出错: {e}") + print("尝试逐个元素转换...") + + # 逐个元素转换,便于定位问题元素 + float_matrix = [] + for i, row in enumerate(matrix_list): + float_row = [] + for j, elem in enumerate(row): + try: + # 再次清理元素并转换 + cleaned_elem = elem.strip(' ,') + float_val = float(cleaned_elem) + float_row.append(float_val) + except ValueError as ve: + print(f"无法转换的元素: 行{i}, 列{j}, 值'{elem}', 错误: {ve}") + # 可以选择设置为0或NaN,或者抛出异常 + float_row.append(0.0) # 或者 np.nan + float_matrix.append(float_row) + + reconstructed_matrix = np.array(float_matrix, dtype=float) + + layout_z = datas.get("layout_z", 0) + print("layout_z", layout_z) + + reconstructed_matrix = reconstructed_matrix @ total_matrix + print("reconstructed_matrix=", reconstructed_matrix) + + return reconstructed_matrix, layout_z + +def get_lowest_position_of_center2(mesh_obj, obj_path, voxel_size = 3): + + # mesh_obj = read_mesh(obj_path) + + vertices = np.asarray(mesh_obj.vertices) + + # 确保网格有顶点 + if len(vertices) == 0: + print(f"Warning: Mesh has no vertices: {obj_path}") + return None + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + # 对点云进行下采样(体素网格法 + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + if len(np.asarray(pcd_downsampled.points)) <= 0: + bbox = pcd.get_axis_aligned_bounding_box() + volume = bbox.volume() + + # print(f"len(pcd.points)={len(pcd.points)}, volume={volume}") + + # 处理体积为零的情况 + if volume <= 0: + # 计算点云的实际范围 + points = np.asarray(pcd.points) + if len(points) > 0: + min_bound = np.min(points, axis=0) + max_bound = np.max(points, axis=0) + extent = max_bound - min_bound + + # 确保最小维度至少为0.01 + min_dimension = max(0.01, np.min(extent)) + volume = min_dimension ** 3 + else: + volume = 1.0 # 最后的安全回退 + + print(f"Warning: Zero volume detected, using approximated volume {volume:.6f} for {obj_path}") + + # 安全计算密度 - 防止除零错误 + if len(pcd.points) > 0 and volume > 0: + original_density = len(pcd.points) / volume + voxel_size = max(0.01, min(10.0, 0.5 / (max(1e-6, original_density) ** 0.33))) + else: + # 当点数为0或体积为0时使用默认体素大小 + voxel_size = 1.0 # 默认值 + + print(f"Recalculated voxel_size: {voxel_size} for {obj_path}") + + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + # 确保下采样后有点云 + if len(np.asarray(pcd_downsampled.points)) == 0: + # 使用原始点云作为后备 + pcd_downsampled = pcd + print(f"Warning: Using original point cloud for {obj_path} as downsampling produced no points") + + points = np.asarray(pcd_downsampled.points) + + # 初始化最小重心Y的值 + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + # 旋转并计算最优角度:绕X、Y、Z轴进行每度的旋转 + best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation4(points, angle_step=3) + # print("best_angle1", best_angle_x, best_angle_y, best_angle_z) + + # 使用最佳角度进行旋转并平移obj + pcd_transformed = copy.deepcopy(mesh_obj) + + # 最佳角度旋转 + """ + R_x = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([1, 0, 0]) * np.radians(best_angle_x)) + pcd_transformed.rotate(R_x) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * np.radians(best_angle_y)) + pcd_transformed.rotate(R_y) + R_z = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 0, 1]) * np.radians(best_angle_z)) + pcd_transformed.rotate(R_z) + #""" + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + center_point = aabb.get_center() + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean1 = np.mean(vertices[:, 2]) + + angle_rad = np.pi + + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + best_angle_y += angle_rad / np.pi * 180 + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean2 = np.mean(vertices[:, 2]) + + # print("z_mean",z_mean1,z_mean2,len(pcd_transformed.vertices),obj_path) + + if z_mean2 > z_mean1: + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + center_point = aabb.get_center() + + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + best_angle_y += -angle_rad / np.pi * 180 + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + z_mean_min = min(z_mean1, z_mean2) + + angle_z_delta = arrange_box_correctly(pcd_transformed, voxel_size) + best_angle_z += angle_z_delta + # print("angle_z_delta", angle_z_delta, best_angle_z) + + return best_angle_x, best_angle_y, best_angle_z, z_mean_min, pcd_transformed + +def arrange_box_correctly(obj_transformed, voxel_size): + + vertices = np.asarray(obj_transformed.vertices) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + # 降采样与特征计算 + pcd_downsampled = down_sample(pcd, voxel_size) + + original_num = len(pcd.points) + target_samples = 1000 + num_samples = min(target_samples, original_num) + + points = np.asarray(pcd_downsampled.points) + cov = np.cov(points.T) + + center = obj_transformed.get_center() + + # 特征分解与方向约束(关键修改点) + eigen_vals, eigen_vecs = np.linalg.eigh(cov) + max_axis = eigen_vecs[:, np.argmax(eigen_vals)] + + # 强制主方向向量X分量为正(指向右侧) + if max_axis[0] < 0 or (max_axis[0] == 0 and max_axis[1] < 0): + max_axis = -max_axis + + target_dir = np.array([1, 0]) # 目标方向为X正轴 + current_dir = max_axis[:2] / np.linalg.norm(max_axis[:2]) + dot_product = np.dot(current_dir, target_dir) + + # print("dot_product", dot_product) + if dot_product < 0.8: # 阈值控制方向敏感性(建议0.6~0.9) + max_axis = -max_axis # 强制翻转方向 + + # 计算旋转角度 + angle_z = np.arctan2(max_axis[1], max_axis[0]) % (2 * np.pi) + + if max_axis[0] <= 0 and max_axis[1] <= 0: + angle_z += np.pi + + # print("max_axis2", max_axis, -angle_z, np.rad2deg(-angle_z)) + + R = o3d.geometry.get_rotation_matrix_from_axis_angle([0, 0, -angle_z]) + + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = center - R.dot(center) # 保持中心不变 + obj_transformed.transform(T) + + return np.rad2deg(-angle_z) diff --git a/get_lowest_position_of_z_out.py b/get_lowest_position_of_z_out.py new file mode 100644 index 0000000..3de702f --- /dev/null +++ b/get_lowest_position_of_z_out.py @@ -0,0 +1,352 @@ +import open3d as o3d +import numpy as np +import copy +import time +import argparse + +""" +对外部提供的获取最低z的接口 +get_lowest_position_of_z_out + +参数: + obj_path, 模型数据路径 + +返回: + total_matrix: 旋转矩阵 + z_max: Z最高点 +""" + +def get_lowest_position_of_z_out(obj_path): + + mesh_obj = o3d.io.read_triangle_mesh(obj_path) + + total_matrix = np.eye(4) + + voxel_size = 3 + + # print(f"obj_path={obj_path}, get_lowest_position_of_center voxel_size={voxel_size}") + start_time1 = time.time() + + vertices = np.asarray(mesh_obj.vertices) + + # 确保网格有顶点 + if len(vertices) == 0: + # raise ValueError(f"Mesh has no vertices: {obj_path}") + print(f"Warning: Mesh has no vertices: {mesh_obj}") + return None + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + # print("voxel_size",voxel_size,obj_path, len(pcd.points), len(mesh_obj.vertices)) + + # 对点云进行下采样(体素网格法) + #""" + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + if len(np.asarray(pcd_downsampled.points)) <= 0: + bbox = pcd.get_axis_aligned_bounding_box() + volume = bbox.volume() + + # print(f"len(pcd.points)={len(pcd.points)}, volume={volume}") + + # 处理体积为零的情况 + if volume <= 0: + # 计算点云的实际范围 + points = np.asarray(pcd.points) + if len(points) > 0: + min_bound = np.min(points, axis=0) + max_bound = np.max(points, axis=0) + extent = max_bound - min_bound + + # 确保最小维度至少为0.01 + min_dimension = max(0.01, np.min(extent)) + volume = min_dimension ** 3 + else: + volume = 1.0 # 最后的安全回退 + + print(f"Warning: Zero volume detected, using approximated volume {volume:.6f} for {obj_path}") + + # 安全计算密度 - 防止除零错误 + if len(pcd.points) > 0 and volume > 0: + original_density = len(pcd.points) / volume + voxel_size = max(0.01, min(10.0, 0.5 / (max(1e-6, original_density) ** 0.33))) + else: + # 当点数为0或体积为0时使用默认体素大小 + voxel_size = 1.0 # 默认值 + + print(f"Recalculated voxel_size: {voxel_size} for {obj_path}") + + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + original_num = len(pcd.points) + target_samples = 1000 + num_samples = min(target_samples, original_num) + + # print("get_lowest_position_of_center1 time", time.time()-start_time1) + start_time2 = time.time() + # 确保下采样后有点云 + if len(np.asarray(pcd_downsampled.points)) == 0: + # 使用原始点云作为后备 + pcd_downsampled = pcd + print(f"Warning: Using original point cloud for {obj_path} as downsampling produced no points") + + points = np.asarray(pcd_downsampled.points) + + # 初始化最小重心Y的值 + max_z_of_mass_y = float('inf') + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + best_angle_x, best_angle_y, best_angle_z, max_z_of_mass_y = parallel_rotation(points, angle_step=3) + + # 使用最佳角度进行旋转并平移obj + pcd_transformed = copy.deepcopy(mesh_obj) + + # 最佳角度旋转 + R_x = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([1, 0, 0]) * np.radians(best_angle_x)) + pcd_transformed.rotate(R_x) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * np.radians(best_angle_y)) + pcd_transformed.rotate(R_y) + R_z = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 0, 1]) * np.radians(best_angle_z)) + pcd_transformed.rotate(R_z) + + T_x = np.eye(4) + T_x[:3, :3] = R_x + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_x @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_y = np.eye(4) + T_y[:3, :3] = R_y + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_y @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_z = np.eye(4) + T_z[:3, :3] = R_z + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_z @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + #试着旋转180,让脸朝上 + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans1 = np.eye(4) + T_trans1[:3, 3] = translation_vector + total_matrix = T_trans1 @ total_matrix + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean1 = np.mean(vertices[:, 2]) + z_max1 = np.max(vertices[:, 2]) + + angle_rad = np.pi + #print("旋转前质心:", pcd_transformed.get_center()) + #print("旋转前点示例:", np.asarray(pcd_transformed.vertices)[:3]) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + centroid = pcd_transformed.get_center() + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + # 构建绕中心点旋转的变换矩阵[3](@ref) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + R_y180 = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + T_rotate = np.eye(4) + T_rotate[:3, :3] = R_y180 + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_rotate @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + #print("旋转后质心:", pcd_transformed.get_center()) + #print("旋转后点示例:", np.asarray(pcd_transformed.vertices)[:3]) + + # + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + max_z = np.max(vertices[:, 2]) + # print("min_z1", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + # translation_vector = np.array([0,0,-min_z + (min_z-max_z),]) + # print("translation_vector1",translation_vector) + pcd_transformed.translate(translation_vector) + + T_trans2 = np.eye(4) + T_trans2[:3, 3] = translation_vector + translation = total_matrix[:3, 3] + # print("translation_vector2",translation_vector) + # print(1,translation) + + total_matrix = T_trans2 @ total_matrix + translation = total_matrix[:3, 3] + # print(2,translation) + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean2 = np.mean(vertices[:, 2]) + z_max2 = np.max(vertices[:, 2]) + + # print(f"get_lowest_position_of_center z_max1={z_max1}, z_max2={z_max2}, len={len(pcd_transformed.vertices)}, obj_path={obj_path}") + + if (z_mean2 > z_mean1): + # if (z_max2 > z_max1): + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + centroid = pcd_transformed.get_center() + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + # 构建反向旋转矩阵 + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + T_rotate_inv = np.eye(4) + T_rotate_inv[:3, :3] = R_y + # 完整的反向绕中心旋转矩阵 + T_rot_center_inv = T_origin_to_center @ T_rotate_inv @ T_center_to_origin + total_matrix = T_rot_center_inv @ total_matrix + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + # print("min_z2", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans3 = np.eye(4) + T_trans3[:3, 3] = translation_vector + total_matrix = T_trans3 @ total_matrix + + # z_mean_min = min(z_mean1, z_mean2) + z_max = min(z_max1, z_max2) + + # print("get_lowest_position_of_center2 time", time.time()-start_time2) + return total_matrix, z_max + +def calculate_rotation_and_top_of_mass(angle_x, angle_y, angle_z, points): + """计算某一组旋转角度后的重心""" + # 计算绕X轴、Y轴和Z轴的旋转矩阵 + R_x = np.array([ + [1, 0, 0], + [0, np.cos(np.radians(angle_x)), -np.sin(np.radians(angle_x))], + [0, np.sin(np.radians(angle_x)), np.cos(np.radians(angle_x))] + ]) + + R_y = np.array([ + [np.cos(np.radians(angle_y)), 0, np.sin(np.radians(angle_y))], + [0, 1, 0], + [-np.sin(np.radians(angle_y)), 0, np.cos(np.radians(angle_y))] + ]) + + R_z = np.array([ + [np.cos(np.radians(angle_z)), -np.sin(np.radians(angle_z)), 0], + [np.sin(np.radians(angle_z)), np.cos(np.radians(angle_z)), 0], + [0, 0, 1] + ]) + + # 综合旋转矩阵 + R = R_z @ R_y @ R_x + + # 执行旋转 + rotated_points = points @ R.T + + # 计算最小z值 + min_z = np.min(rotated_points[:, 2]) + + # 计算平移向量,将最小Z值平移到0 + translation_vector = np.array([0, 0, -min_z]) + rotated_points += translation_vector + + top_of_mass = np.max(rotated_points, axis=0) + + return top_of_mass[2], angle_x, angle_y, angle_z + +def parallel_rotation(points, angle_step=4): + """仅绕 Y 轴旋转(假设 X/Z 轴不影响目标函数)""" + max_top = float('inf') + + for angle_x in range(-90, 90, angle_step): + for angle_y in range(0, 360, angle_step): + max_z, ax, ay, _ = calculate_rotation_and_top_of_mass(angle_x, angle_y, 0, points) + if max_z < max_top: + max_top = max_z + best_angle_x = ax + best_angle_y = ay + + return (best_angle_x, best_angle_y, 0, max_top) + +def compute_mesh_center(vertices): + + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + # 确保vertices是NumPy数组 + vertices_np = np.asarray(vertices) + + # 使用NumPy的mean函数直接计算均值(向量化操作) + centroid = np.mean(vertices_np, axis=0) + + return centroid + +def down_sample(pcd, voxel_size, farthest_sample = False): + original_num = len(pcd.points) + target_samples = 1500 # 1000 + num_samples = min(target_samples, original_num) + + # 第一步:使用体素下采样快速减少点数量 + # voxel_size = 3 + if farthest_sample: + pcd_voxel = pcd.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_voxel = pcd.voxel_down_sample(voxel_size) + down_num = len(pcd_voxel.points) + # print(f"original_num={original_num}, down_num={down_num}") + + # 第二步:仅在必要时进行最远点下采样 + if len(pcd_voxel.points) > target_samples and False: + pcd_downsampled = pcd_voxel.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_downsampled = pcd_voxel + + return pcd_downsampled + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--obj_path", type=str, required=True, help="batchobj_path_id") + args = parser.parse_args() + + obj_path = args.obj_path + max, z = get_lowest_position_of_z_out(obj_path) + diff --git a/grid_near_three.py b/grid_near_three.py new file mode 100644 index 0000000..9838f30 --- /dev/null +++ b/grid_near_three.py @@ -0,0 +1,179 @@ +import os +import time + +import open3d as o3d +import numpy as np + + + +def make_near_dict(base_dir,compact_dir): + """""" + + # 用于存储结果的字典 + results = {} + # 遍历目录中的所有 .ply 文件 + for ply_file in os.listdir(base_dir): + # 检查文件是否为 .ply 格式 + if ply_file.endswith('.ply'): + ply_path = os.path.join(base_dir, ply_file) + compact_ply_path = os.path.join(compact_dir, ply_file) + if os.path.exists(compact_ply_path): + ply_read_path = compact_ply_path + else: + ply_read_path = ply_path + # 读取点云 + pcd = o3d.io.read_point_cloud(ply_read_path) + + # 获取点云的点数据 + points = np.asarray(pcd.points) + + # 计算质心 + centroid = np.mean(points, axis=0) + + # 计算 Y 轴最小值 + min_y_value = np.min(points[:, 1]) # Y 轴最小值 + max_y_value = np.max(points[:, 1]) + + # 计算 X 轴最小值 + min_x_value = np.min(points[:, 0]) # X 轴最小值 + max_x_value = np.max(points[:, 0]) # X 轴最小值 + #ply_pid = ply_file.split("_")[0] + # 将结果存入字典 + results[ply_file] = { + "centroid": centroid, + "min_x_value": min_x_value, + "min_y_value": min_y_value, + "max_x_value": max_x_value, + "max_y_value": max_y_value, + + } + + # 打印结果 + # for ply_file, values in results.items(): + # print(f"文件: {ply_file}") + # print(f" 质心: {values['centroid']}") + # print(f" X 轴最小值: {values['min_x_value']}") + # print(f" Y 轴最小值: {values['min_y_value']}") + + # 计算每个ply需要触碰检测的 + check_touch_dict = {} + for ply_file in results.keys(): + print(ply_file) + #ply_pid = ply_file.split("_")[0] + #print(ply_pid) + bounds_min_x = results[ply_file]["min_x_value"] + bounds_min_y = results[ply_file]["min_y_value"] + #bounds_center = results[ply_file]["centroid"] + need_check_list = [] + need_values_dict = {} + for ply_file_near in results.keys(): + #print(ply_file_near) + if ply_file!= ply_file_near: + bounds_max_x = results[ply_file_near]["max_x_value"] + bounds_max_y = results[ply_file_near]["max_y_value"] + # if ply_file == "151140_9cm_x1=30.578+41.705+90.753.ply": + # print("-"*50) + # print("主::",ply_file) + # print("从::",ply_file_near) + # print(f"center_x{bounds_center[0]}") + # print(f"center_y{bounds_center[1]}") + # print(f"bounds_max_x{bounds_max_x}") + # print(f"bounds_max_y{bounds_max_y}") + # time.sleep(3) + # 235605_12cm_x1=33.774+30.837+120.344.ply + # if bounds_center[0]bounds_min_x and bounds_near_center[1]>bounds_min_y: + #print("-"*50) + # print(f"bounds_min_x{bounds_min_x}") + # print(f"bounds_max_x{bounds_max_x}") + + x_dis = bounds_min_x - bounds_max_x + y_dis = bounds_min_y - bounds_max_y + #print(f"x_dis=={x_dis}") + #print(f"y_dis=={y_dis}") + #if ply_file=="158040_15cm_x1=80.682+89.345+152.468.ply": + #if ply_file == "235547_4.8cm_x1=29.339+39.528+57.63.ply": + # print("主::",ply_file) + # print("从::",ply_file_near) + #if ply_file == "158040_15cm_x1=80.682+89.345+152.468.ply": + #if ply_file == "151140_9cm_x1=30.578+41.705+90.753.ply": + # print("主::", ply_file) + # print("临近::", ply_file_near) + # time.sleep(3) + if x_dis<-10 and y_dis<-10: + need_check_list.append(ply_file_near) + need_values_dict["need_check_list"] = need_check_list + # need_values_dict["max_x_value"] = bounds_max_x + # need_values_dict["max_y_value"] = bounds_max_y + + check_touch_dict[ply_file] = need_values_dict + + + # print(check_touch_dict) + # print("开始要计算触碰检测的数据") + # for ply_file, values in check_touch_dict.items(): + # print("*"*50) + # print(ply_file) + # print(values) + + # 去掉离比较远的数据 + for check_touch_key,check_touch_values in check_touch_dict.items(): + print("-"*50) + #print(check_touch_key) + #print(check_touch_values["need_check_list"]) + need_check_list= check_touch_values["need_check_list"] + #print(len(need_check_list)) + if len(need_check_list)>2: + ply_A_path = os.path.join(base_dir, check_touch_key) + compact_ply_path = os.path.join(compact_dir, check_touch_key) + if os.path.exists(compact_ply_path): + ply_read_path = compact_ply_path + else: + ply_read_path = ply_A_path + pcd_A = o3d.io.read_point_cloud(ply_read_path) + points_A = np.asarray(pcd_A.points) + distances = [] + for i, check_touch in enumerate(need_check_list): + point = results[check_touch]['centroid'] + ply_path = os.path.join(base_dir, check_touch) + + # 读取当前点云 + pcd = o3d.io.read_point_cloud(ply_path) + points = np.asarray(pcd.points) + + # 计算点云之间最小点对距离(brute-force) + diff = points_A[:, np.newaxis, :] - points[np.newaxis, :, :] # (N, M, 3) + dists = np.linalg.norm(diff, axis=2) # (N, M) + min_distance = np.min(dists) + + #print(f"check_touch: {check_touch}, centroid: {point}, min_distance: {min_distance:.4f}") + distances.append((i, point, min_distance, check_touch)) + distances.sort(key=lambda x: x[2]) + + # 提取最近的 3 个点 + nearest_points = distances[:5] + last_elements = [item[-1] for item in nearest_points] + # print(f"nearest_points---------{nearest_points}") + # print(f"check_touch_key--------{check_touch_key}") + # print(f"last_elements--------{last_elements}") + check_touch_dict[check_touch_key]["need_check_list"] = last_elements + + return check_touch_dict + + # for check_touch_key,check_touch_values in check_touch_dict.items(): + # print("*"*50) + # print(check_touch_key) + # print(check_touch_values) + + +if __name__ == '__main__': + bounds_fix_out_dir = "/data/datasets_20t/type_setting_test_data/print_bounds_fix_data/" + check_touch_dict=make_near_dict(bounds_fix_out_dir) + print(f"check_touch_dict--------------{check_touch_dict}") + """ + {'need_check_list': ['131508_18cm_x1=51.412+87.921+181.446.ply', '239617_12cm_x1=43.987+54.233+120.691.ply']}, + """ + diff --git a/print.sh b/print.sh new file mode 100755 index 0000000..a0c999f --- /dev/null +++ b/print.sh @@ -0,0 +1,6 @@ +#!/bin/bash +while true; do + python clound_print.py | tee -a output.log + echo "脚本执行完成,等待10秒后重新运行..." + sleep 10 +done diff --git a/print_factory_type_setting_obj_run.py b/print_factory_type_setting_obj_run.py new file mode 100644 index 0000000..9e415bc --- /dev/null +++ b/print_factory_type_setting_obj_run.py @@ -0,0 +1,291 @@ +import os +import shutil +import time +import sys +import argparse + +script_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, script_dir) + +from print_show_weight_max_obj import make_bbox_for_print,copy_obj_2x +from print_mplot3d_point_cloud_layout import * +from print_merged_many_obj import move_compact_obj_to_file +from test_load_json import load_show_save +from download_print import upload_result + +def get_base_directory(): + """获取脚本或可执行文件的基础目录""" + if getattr(sys, 'frozen', False): + # 打包后的可执行文件环境 + base_path = os.path.dirname(sys.executable) + else: + # 正常脚本运行环境 + base_path = os.path.dirname(os.path.abspath(__file__)) + return base_path + +from datetime import datetime +import gc + +def print_type_setting_obj(base_original_obj_dir=None,cache_type_setting_dir=None,batch_id=0,show_chart=True,selected_mode="标准",output_format="JSON",selected_machine="大机型"): + + print_start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + weight_fix_out_obj_dir = f"{cache_type_setting_dir}/temp/print_weight_fix_data_obj" + weight_fix_out_ply_dir = f"{cache_type_setting_dir}/temp/print_weight_fix_data_ply" + bounds_fix_out_dir = f"{cache_type_setting_dir}/temp/print_bounds_fix_data" + bounds_compact_out_dir = f"{cache_type_setting_dir}/temp/print_bounds_compact_data" + compact_obj_out_dir = f"{cache_type_setting_dir}/temp/print_compact_obj" # 最后的结果 + placed_remove_dir = f"{base_original_obj_dir}/place_remove_dir" # 已经放置的放到这个目录 + + # 获取基础目录 + base_path = get_base_directory() + # 获取父目录 + parent_dir = os.path.dirname(base_path) + bad_dir = os.path.join(parent_dir, "bad") + full_dir = os.path.join(parent_dir, "full") + # print(bad_dir) + # print(full_dir) + + if output_format == "模型": + if os.path.exists(weight_fix_out_obj_dir): + shutil.rmtree(weight_fix_out_obj_dir) + if os.path.exists(weight_fix_out_ply_dir): + shutil.rmtree(weight_fix_out_ply_dir) + if os.path.exists(bounds_fix_out_dir): + shutil.rmtree(bounds_fix_out_dir) + if os.path.exists(bounds_compact_out_dir): + shutil.rmtree(bounds_compact_out_dir) + if os.path.exists(compact_obj_out_dir): + shutil.rmtree(compact_obj_out_dir) + # if os.path.exists(output_folder): # 不需要 + # shutil.rmtree(output_folder) + + time.sleep(1) + if not os.path.exists(weight_fix_out_obj_dir): + os.makedirs(weight_fix_out_obj_dir) + if not os.path.exists(weight_fix_out_ply_dir): + os.makedirs(weight_fix_out_ply_dir) + if not os.path.exists(bounds_fix_out_dir): + os.mkdir(bounds_fix_out_dir) + if not os.path.exists(bounds_compact_out_dir): + os.makedirs(bounds_compact_out_dir) + if not os.path.exists(compact_obj_out_dir): + os.makedirs(compact_obj_out_dir) + # if not os.path.exists(output_folder): # 不需要 + # os.makedirs(output_folder) + + print("selected_machine",selected_machine,"selected_mode",selected_mode,"output_format",output_format) + compact_min_dis = True + compact_min_dis2 = False + if selected_mode=="标准" : + compact_min_dis = False + # if output_format=="JSON": + compact_min_dis2 = True + else : + compact_min_dis = True + + move_back = True + + machine_size = [600, 500, 300] + if selected_machine=="小机型": + machine_size[0] = 380 + machine_size[1] = 345 + machine_size[2] = 250 + + start_time = time.time() + copy_obj_2x(base_original_obj_dir) + dict_bad = {} + dict_best_angel = {} + dict_fix = {} + dict_origin = {} + dict_origin_real = {} + dict_total_matrix= {} + dict_mesh_obj = make_bbox_for_print(base_original_obj_dir, weight_fix_out_obj_dir, weight_fix_out_ply_dir,show_chart,dict_bad, dict_best_angel,dict_fix,dict_origin,dict_origin_real,compact_min_dis or compact_min_dis2,dict_total_matrix) + mesh_count = len(dict_mesh_obj) + if mesh_count<=0: + print("选择的文件夹没有模型") + return -1 + end_time1 = time.time() + dict_bounds_fix = {} + placed_models= ply_print_layout_platform(weight_fix_out_obj_dir,weight_fix_out_ply_dir,bounds_fix_out_dir,show_chart,dict_mesh_obj,dict_fix,dict_bounds_fix,machine_size,dict_total_matrix) + end_time2 = time.time() + dict_unplaced = {} + dict_compact = {} + + if compact_min_dis: + # if output_format=="JSON" : + if True : + can_compact_json = True + if can_compact_json : + compact_mode_for_min_dis1_json(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix) + else : + pass_for_min_dis(bounds_fix_out_dir, bounds_compact_out_dir,placed_models, dict_unplaced,dict_bounds_fix,dict_compact) + else : + compact_mode_for_min_dis(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size) + else: + compact_min_dis2 = False + if compact_min_dis2: + compact_mode_for_min_dis2_json(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix) + else : + pass_for_min_dis(bounds_fix_out_dir, bounds_compact_out_dir,placed_models, dict_unplaced,dict_bounds_fix,dict_compact) + + end_time3 = time.time() + + #if os.path.exists(placed_remove_dir): + # shutil.rmtree(placed_remove_dir) + if not os.path.exists(placed_remove_dir): + os.makedirs(placed_remove_dir) + + if not os.path.exists(bad_dir): + os.makedirs(bad_dir) + + if not os.path.exists(full_dir): + os.makedirs(full_dir) + + is_small_machine = True if selected_machine=="小机型" else False + use_json = True if output_format=="JSON" else False + save_mesh = True if output_format=="模型" else False + # if use_json: + + version = "print_type_setting25.local" + + layout_data, send_layout_data = move_obj_to_compact_bounds_json(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir, + base_original_obj_dir,compact_obj_out_dir,dict_mesh_obj,dict_unplaced, + placed_remove_dir,dict_bad,bad_dir,full_dir,dict_best_angel,dict_bounds_fix, + dict_compact,dict_origin,dict_total_matrix,save_mesh,cache_type_setting_dir, + batch_id, print_start_time,selected_machine,selected_mode,version) + + # else: + # move_obj_to_compact_bounds(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir,base_original_obj_dir,compact_obj_out_dir,dict_mesh_obj,dict_unplaced,placed_remove_dir,dict_bad,bad_dir,full_dir,dict_bounds_fix,dict_compact,dict_origin) + + end_time4 = time.time() + #move_compact_obj_to_file(compact_obj_out_dir, output_folder) # 不需要 + print("排版完成") + end_time = time.time() + elapsed_seconds = end_time - start_time + elapsed_seconds1 = end_time1 - start_time # 计算重心 + elapsed_seconds2 = end_time2 - end_time1 # + elapsed_seconds3 = end_time3 - end_time2 + elapsed_seconds4 = end_time4 - end_time3 + elapsed_minutes = int(elapsed_seconds // 60) + elapsed_minutes1 = int(elapsed_seconds1 // 60) + elapsed_minutes2 = int(elapsed_seconds2 // 60) + elapsed_minutes3 = int(elapsed_seconds3 // 60) + elapsed_minutes4 = int(elapsed_seconds4 // 60) + print(f"排版总耗时::{elapsed_minutes} 分 / {elapsed_seconds} 秒") + print(f"计算重心::{elapsed_minutes1} 分 / {elapsed_seconds1} 秒") + print(f"排包围盒::{elapsed_minutes2} 分 / {elapsed_seconds2} 秒") + + print(f"挪紧凑::{elapsed_minutes3} 分 / {elapsed_seconds3} 秒") + print(f"移动到位置::{elapsed_minutes4} 分 / {elapsed_seconds4} 秒") + + dict_mesh_obj.clear() + del dict_mesh_obj + dict_bad.clear() + del dict_bad + dict_fix.clear() + del dict_fix + dict_bounds_fix.clear() + del dict_bounds_fix + dict_unplaced.clear() + del dict_unplaced + dict_compact.clear() + del dict_compact + gc.collect() + + # print(base_original_obj_dir,blank_dir,batch_id) + + is_screenshot = True + if is_screenshot: + start_time = time.time() + blank_path = get_blank_path(parent_dir, is_small_machine) + load_show_save(base_original_obj_dir, dict_origin, blank_path, batch_id) + elapsed_seconds5 = time.time() - start_time + elapsed_minutes5 = int(elapsed_seconds5 // 60) + print(f"保存截图耗时::{elapsed_minutes5} 分 / {elapsed_seconds5} 秒") + + dict_origin.clear() + del dict_origin + gc.collect() + + is_upload_result = True + if is_upload_result: + print(f"执行上传-parent_dir={parent_dir},base_original_obj_dir={base_original_obj_dir},batch_id={batch_id}") + # oss_config = f"{base_original_obj_dir}/../print_factory_type_setting_big/download_print/run.yaml" + oss_config = f"{parent_dir}/print_factory_type_setting_big/download_print/run.yaml" + + upload_result(base_original_obj_dir, oss_config, batch_id) + + print(f"is_test={is_test}") + if is_test : + is_send_layout_data = False + else : + is_send_layout_data = True + # is_send_layout_data = False + + if is_send_layout_data: + print(f"send_layout_data={send_layout_data}") + url = 'https://mp.api.suwa3d.com/api/printTypeSettingOrder/printTypeSettingOrderSuccess' + # url = 'http://127.0.0.1:8199/api/typeSettingPrintOrder/printTypeSettingOrderSuccess' + try: + response = requests.post(url, json.dumps(send_layout_data), timeout=30) + #写入文件中 log/request.txt + # with open('log/request.txt', 'w+') as f: + # f.write(json.dumps(send_layout_data, ensure_ascii=False, indent=2)) + # 检查响应状态码 + if response.status_code == 200: + try: + result = response.json() + print(f"请求成功,返回结果: {result}") + except ValueError as e: + print(f"响应不是有效的JSON格式: {e}") + print(f"响应内容: {response.text}") + else: + print(f"请求失败,状态码: {response.status_code}") + print(f"响应内容: {response.text}") + except requests.exceptions.Timeout: + print(f"请求超时: 连接 {url} 超过30秒未响应") + except requests.exceptions.ConnectionError as e: + print(f"连接错误: 无法连接到服务器 {url}, 错误信息: {e}") + except requests.exceptions.RequestException as e: + print(f"请求异常: {e}") + except Exception as e: + print(f"未知错误: {e}") + + return 0 + +def get_blank_path(parent_dir=None, is_small_machine=False): + if is_small_machine: + return os.path.join(parent_dir, "blank/blank_bias/blank_small.obj") + else: + return os.path.join(parent_dir, "blank/blank_bias/blank2.obj") + +def preview(base_original_obj_dir=None, batch_id=0, is_small_machine=False): + + base_path = get_base_directory() + parent_dir = os.path.dirname(base_path) + # blank_dir = os.path.join(parent_dir, "blank", "blank_bias") + blank_path = get_blank_path(parent_dir, is_small_machine) + + load_show_save(base_original_obj_dir, {}, blank_path, batch_id, True) + +if __name__ == '__main__': + + # parser = argparse.ArgumentParser() + # parser.add_argument("--batch_id", type=str, required=True, help="batch_id") + # args = parser.parse_args() + + # batch_id = args.batch_id + batch_id = "9" + src_dir = batch_id + selected_mode="紧凑" # 标准 紧凑 + output_format="JSON" # 模型 JSON + selected_machine = "大机型" # 小机型 大机型 + + print_factory_type_dir="/root/print_factory_type" + # cache_type_setting_dir=f"/data/datasets_20t/type_setting_test_data/{src_dir}" + cache_type_setting_dir=f"{print_factory_type_dir}/{src_dir}/arrange" + base_original_obj_dir = f"{print_factory_type_dir}/{src_dir}" + + print_type_setting_obj(base_original_obj_dir=base_original_obj_dir,cache_type_setting_dir=cache_type_setting_dir, + batch_id=batch_id,show_chart=False,selected_mode=selected_mode,output_format=output_format,selected_machine=selected_machine) diff --git a/print_factory_type_setting_obj_run_GUI.py b/print_factory_type_setting_obj_run_GUI.py new file mode 100644 index 0000000..d480010 --- /dev/null +++ b/print_factory_type_setting_obj_run_GUI.py @@ -0,0 +1,193 @@ +import os +import shutil +import time +import sys + +script_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, script_dir) + +from print_show_weight_max_obj import make_bbox_for_print,copy_obj_2x +from print_mplot3d_point_cloud_layout import * +from print_merged_many_obj import move_compact_obj_to_file +from test_load_json import load_and_show + + +def get_base_directory(): + """获取脚本或可执行文件的基础目录""" + if getattr(sys, 'frozen', False): + # 打包后的可执行文件环境 + base_path = os.path.dirname(sys.executable) + else: + # 正常脚本运行环境 + base_path = os.path.dirname(os.path.abspath(__file__)) + return base_path + +from datetime import datetime +def print_type_setting_obj(base_original_obj_dir=None,cache_type_setting_dir=None,show_chart=True,selected_mode="标准",output_format="JSON",selected_machine="大机型"): + + print_start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + weight_fix_out_obj_dir = f"{cache_type_setting_dir}/print_weight_fix_data_obj" + weight_fix_out_ply_dir = f"{cache_type_setting_dir}/print_weight_fix_data_ply" + bounds_fix_out_dir = f"{cache_type_setting_dir}/print_bounds_fix_data" + bounds_compact_out_dir = f"{cache_type_setting_dir}/print_bounds_compact_data" + compact_obj_out_dir = f"{cache_type_setting_dir}/print_compact_obj" # 最后的结果 + placed_remove_dir = f"{base_original_obj_dir}/place_remove_dir" # 已经放置的放到这个目录 + + # 获取基础目录 + base_path = get_base_directory() + # 获取父目录 + parent_dir = os.path.dirname(base_path) + bad_dir = os.path.join(parent_dir, "bad") + full_dir = os.path.join(parent_dir, "full") + blank_dir = os.path.join(parent_dir, "blank") + # print(bad_dir) + # print(full_dir) + + # 测试代码 + """ + selected_machine = "小机型" # 小机型 大机型 + selected_mode="紧凑" # 标准 紧凑 + output_format="模型" # 模型 JSON + """ + + if output_format == "模型": + if os.path.exists(weight_fix_out_obj_dir): + shutil.rmtree(weight_fix_out_obj_dir) + if os.path.exists(weight_fix_out_ply_dir): + shutil.rmtree(weight_fix_out_ply_dir) + if os.path.exists(bounds_fix_out_dir): + shutil.rmtree(bounds_fix_out_dir) + if os.path.exists(bounds_compact_out_dir): + shutil.rmtree(bounds_compact_out_dir) + if os.path.exists(compact_obj_out_dir): + shutil.rmtree(compact_obj_out_dir) + # if os.path.exists(output_folder): # 不需要 + # shutil.rmtree(output_folder) + + time.sleep(1) + if not os.path.exists(weight_fix_out_obj_dir): + os.makedirs(weight_fix_out_obj_dir) + if not os.path.exists(weight_fix_out_ply_dir): + os.makedirs(weight_fix_out_ply_dir) + if not os.path.exists(bounds_fix_out_dir): + os.mkdir(bounds_fix_out_dir) + if not os.path.exists(bounds_compact_out_dir): + os.makedirs(bounds_compact_out_dir) + if not os.path.exists(compact_obj_out_dir): + os.makedirs(compact_obj_out_dir) + # if not os.path.exists(output_folder): # 不需要 + # os.makedirs(output_folder) + + print("selected_machine",selected_machine,"selected_mode",selected_mode,"output_format",output_format) + compact_min_dis = True + compact_min_dis2 = False + if selected_mode=="标准" : + compact_min_dis = False + if output_format=="JSON": + compact_min_dis2 = True + else : + compact_min_dis = True + + move_back = True + + machine_size = [600, 500, 300] + if selected_machine=="小机型": + machine_size[0] = 380 + machine_size[1] = 345 + machine_size[2] = 250 + + start_time = time.time() + copy_obj_2x(base_original_obj_dir) + dict_bad = {} + dict_best_angel = {} + dict_fix = {} + dict_origin = {} + dict_total_matrix= {} + dict_mesh_obj = make_bbox_for_print(base_original_obj_dir, weight_fix_out_obj_dir, weight_fix_out_ply_dir,show_chart,dict_bad, dict_best_angel,dict_fix,dict_origin,compact_min_dis or compact_min_dis2,dict_total_matrix) + mesh_count = len(dict_mesh_obj) + if mesh_count<=0: + print("选择的文件夹没有模型") + return -1 + end_time1 = time.time() + dict_bounds_fix = {} + placed_models= ply_print_layout_platform(weight_fix_out_obj_dir,weight_fix_out_ply_dir,bounds_fix_out_dir,show_chart,dict_mesh_obj,dict_fix,dict_bounds_fix,machine_size,dict_total_matrix) + end_time2 = time.time() + dict_unplaced = {} + dict_compact = {} + + if compact_min_dis: + if output_format=="JSON" : + can_compact_json = True + if can_compact_json : + compact_mode_for_min_dis1_json(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix) + else : + pass_for_min_dis(bounds_fix_out_dir, bounds_compact_out_dir,placed_models, dict_unplaced,dict_bounds_fix,dict_compact) + else : + compact_mode_for_min_dis(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size) + else: + if compact_min_dis2: + compact_mode_for_min_dis2_json(bounds_fix_out_dir,bounds_compact_out_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix) + else : + pass_for_min_dis(bounds_fix_out_dir, bounds_compact_out_dir,placed_models, dict_unplaced,dict_bounds_fix,dict_compact) + + end_time3 = time.time() + + #if os.path.exists(placed_remove_dir): + # shutil.rmtree(placed_remove_dir) + if not os.path.exists(placed_remove_dir): + os.makedirs(placed_remove_dir) + + if not os.path.exists(bad_dir): + os.makedirs(bad_dir) + + if not os.path.exists(full_dir): + os.makedirs(full_dir) + + save_mesh = True if output_format=="模型" else False + # move_obj_to_compact_bounds_json(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir,base_original_obj_dir,compact_obj_out_dir,dict_mesh_obj,dict_unplaced,placed_remove_dir,dict_bad,bad_dir,full_dir,dict_best_angel,dict_bounds_fix,dict_compact,save_mesh,dict_origin,dict_total_matrix,print_start_time) + + version = "print_type_setting25.11.21.1" + batch_id = os.path.basename(base_path.rstrip('/')) + move_obj_to_compact_bounds_json(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir, + base_original_obj_dir,compact_obj_out_dir,dict_mesh_obj,dict_unplaced, + placed_remove_dir,dict_bad,bad_dir,full_dir,dict_best_angel,dict_bounds_fix, + dict_compact,dict_origin,dict_total_matrix,save_mesh,cache_type_setting_dir, + batch_id, print_start_time,selected_machine,selected_mode,version) + end_time4 = time.time() + #move_compact_obj_to_file(compact_obj_out_dir, output_folder) # 不需要 + print("排版完成") + end_time = time.time() + elapsed_seconds = end_time - start_time + elapsed_seconds1 = end_time1 - start_time # 计算重心 + elapsed_seconds2 = end_time2 - end_time1 # + elapsed_seconds3 = end_time3 - end_time2 + elapsed_seconds4 = end_time4 - end_time3 + elapsed_minutes = int(elapsed_seconds // 60) + elapsed_minutes1 = int(elapsed_seconds1 // 60) + elapsed_minutes2 = int(elapsed_seconds2 // 60) + elapsed_minutes3 = int(elapsed_seconds3 // 60) + elapsed_minutes4 = int(elapsed_seconds4 // 60) + print(f"排版总耗时::{elapsed_minutes} 分 / {elapsed_seconds} 秒") + print(f"计算重心::{elapsed_minutes1} 分 / {elapsed_seconds1} 秒") + print(f"排包围盒::{elapsed_minutes2} 分 / {elapsed_seconds2} 秒") + + print(f"挪紧凑::{elapsed_minutes3} 分 / {elapsed_seconds3} 秒") + print(f"移动到位置::{elapsed_minutes4} 分 / {elapsed_seconds4} 秒") + + # load_and_show(base_original_obj_dir,blank_dir) + + return 0 + +def preview(base_original_obj_dir=None): + + base_path = get_base_directory() + parent_dir = os.path.dirname(base_path) + blank_dir = os.path.join(parent_dir, "blank", "blank_bias") + + load_and_show(base_original_obj_dir,blank_dir) + +if __name__ == '__main__': + src_dir = "12-9" # 1 5.6.5 5.6.4 5.6.1 5.9 temp + cache_type_setting_dir=f"/data/datasets_20t/type_setting_test_data/{src_dir}" + base_original_obj_dir = f"{print_factory_type_dir}/{src_dir}" + print_type_setting_obj(base_original_obj_dir=base_original_obj_dir,cache_type_setting_dir=cache_type_setting_dir,show_chart=False) diff --git a/print_merged_many_obj.py b/print_merged_many_obj.py new file mode 100644 index 0000000..1671fb0 --- /dev/null +++ b/print_merged_many_obj.py @@ -0,0 +1,162 @@ +import os +import shutil +import time + +def merged_obj_for_group(input_folder,output_folder): + """""" + group_size = 5 + obj_pid_list = os.listdir(input_folder) + group_obj_list = [obj_pid_list[i:i + group_size] for i in range(0, len(obj_pid_list), group_size)] + print(group_obj_list) + for group_obj in group_obj_list: + print(group_obj) + group_pid = "_".join(group_obj) + print(group_pid) + + output_group_folder = os.path.join(output_folder,group_pid) + os.makedirs(output_group_folder, exist_ok=True) + + #input_root_folder = "/data/datasets_20t/obj_merger_test_data/" + #output_folder = "/data/datasets_20t/obj_merger_result/" + output_obj = os.path.join(output_group_folder, f"{group_pid}.obj") + output_mtl = os.path.join(output_group_folder, f"{group_pid}.mtl") + + # 初始化 + merged_obj = [] + merged_mtl = [] + texture_files = set() + material_offset = 0 + vertex_offset = 0 + texture_offset = 0 + normal_offset = 0 + current_materials = {} + + # 遍历每个子文件夹 + for folder_name in group_obj: + folder_path = os.path.join(input_folder, folder_name) + if not os.path.isdir(folder_path): + continue + + obj_file = None + mtl_file = None + texture_file = None + + # 寻找 .obj、.mtl 和 .jpg 文件 + for file_name in os.listdir(folder_path): + if file_name.endswith(".obj"): + obj_file = os.path.join(folder_path, file_name) + elif file_name.endswith(".mtl"): + mtl_file = os.path.join(folder_path, file_name) + elif file_name.endswith(".jpg"): + texture_file = os.path.join(folder_path, file_name) + + # 跳过不完整的文件夹 + if not obj_file or not mtl_file or not texture_file: + print(f"跳过不完整的文件夹:{folder_path}") + continue + + # 读取 .obj 文件 + with open(obj_file, "r") as obj_f: + obj_lines = obj_f.readlines() + + + for line in obj_lines: + if line.startswith("mtllib"): + # 替换材质文件名 + + merged_obj.append(f"mtllib {os.path.basename(output_mtl)}\n") + + elif line.startswith("usemtl"): + # 重命名材质名称,避免冲突 + original_material = line.split()[1] + new_material = f"{original_material}_{material_offset}" + print(f"original_material---{original_material}") + print(f"new_material---{new_material}") + merged_obj.append(f"usemtl {new_material}\n") + current_materials[original_material] = new_material + elif line.startswith("v "): # 顶点 + vertex = line.split()[1:] + merged_obj.append(f"v {' '.join(vertex)}\n") + elif line.startswith("vt "): # 纹理坐标 + texture = line.split()[1:] + merged_obj.append(f"vt {' '.join(texture)}\n") + elif line.startswith("vn "): # 法线 + normal = line.split()[1:] + merged_obj.append(f"vn {' '.join(normal)}\n") + elif line.startswith("f "): # 面数据 + face = line.split()[1:] + updated_face = [] + for vertex in face: + indices = vertex.split("/") + indices = [ + str(int(indices[0]) + vertex_offset) if indices[0] else "", + str(int(indices[1]) + texture_offset) if len(indices) > 1 and indices[1] else "", + str(int(indices[2]) + normal_offset) if len(indices) > 2 and indices[2] else "", + ] + updated_face.append("/".join(indices)) + merged_obj.append(f"f {' '.join(updated_face)}\n") + + # 更新偏移量 + vertex_offset += sum(1 for line in obj_lines if line.startswith("v ")) + texture_offset += sum(1 for line in obj_lines if line.startswith("vt ")) + normal_offset += sum(1 for line in obj_lines if line.startswith("vn ")) + + # 读取 .mtl 文件 + with open(mtl_file, "r") as mtl_f: + mtl_lines = mtl_f.readlines() + + for line in mtl_lines: + if line.startswith("newmtl"): + # 重命名材质 + original_material = line.split()[1] + new_material = current_materials.get(original_material, original_material) + merged_mtl.append(f"newmtl {new_material}\n") + elif line.startswith(("map_Kd", "map_Ka", "map_bump")): + # 替换贴图路径为相对路径 + texture_name = os.path.basename(texture_file) + merged_mtl.append(f"{line.split()[0]} {texture_name}\n") + texture_files.add(texture_file) + else: + merged_mtl.append(line) + + material_offset += 1 + + # 写入合并后的 .obj 和 .mtl 文件 + with open(output_obj, "w") as obj_out: + obj_out.writelines(merged_obj) + + with open(output_mtl, "w") as mtl_out: + mtl_out.writelines(merged_mtl) + print(f"texture_files====={texture_files}") + # 将纹理文件复制到输出目录 + for texture_file in texture_files: + shutil.copy(texture_file, output_group_folder) + + print(f"合并完成:{output_obj} 和 {output_mtl}") + print(f"纹理文件已复制到:{output_group_folder}") + +def move_compact_obj_to_file(input_folder,output_folder): + """""" + group_size = 50 + obj_pid_list = os.listdir(input_folder) + group_obj_list = [obj_pid_list[i:i + group_size] for i in range(0, len(obj_pid_list), group_size)] + print(group_obj_list) + for group_obj in group_obj_list: + print(f"group_obj{group_obj}") + out_obj_file_name = group_obj[0]+"_"+group_obj[-1] + print(f"out_obj_file_name:::{out_obj_file_name}") + group_out_put_dir = os.path.join(output_folder,out_obj_file_name) + os.makedirs(group_out_put_dir,exist_ok=True) + for obj_name in group_obj: + original_obj_dir = os.path.join(input_folder,obj_name) + for file_name in os.listdir(original_obj_dir): + original_path = os.path.join(original_obj_dir,file_name) + dis_path = os.path.join(group_out_put_dir,file_name) + shutil.copy(original_path,dis_path) + print("分组完成。") + +if __name__ == '__main__': + input_folder = "/data/datasets_20t/type_setting_test_data/print_compact_obj/" + output_folder = "/data/datasets_20t/type_setting_test_data/obj_merger_result/" + #merged_obj_for_group(input_folder,output_folder) + move_compact_obj_to_file(input_folder, output_folder) diff --git a/print_mplot3d_point_cloud_layout.py b/print_mplot3d_point_cloud_layout.py new file mode 100644 index 0000000..e4bc2f4 --- /dev/null +++ b/print_mplot3d_point_cloud_layout.py @@ -0,0 +1,3619 @@ +import os +import json +import requests +import shutil +import time +import random +import matplotlib.pyplot as plt +import open3d as o3d +import numpy as np +from grid_near_three import make_near_dict +#import bpy +from plyfile import PlyData, PlyElement +from test_load_json import custom_mesh_transform +from download_print import is_test + +class Platform: + def __init__(self, width, depth, height): + self.width = width + self.depth = depth + self.height = height + self.placed_models = [] # 已放置的模型 + self.unplaced_models = [] # 未能放置的模型 + self.first_line = True + + #""" + def can_place(self, x, y, z, model): + """检查模型是否可以放置在指定位置""" + mx, my, mz = model['dimensions'] + # 检查是否超出平台边界 + # print("can_place1",x + mx,self.width,y + my,self.depth,z + mz,self.height) + if x + mx > self.width or y + my > self.depth or z + mz > self.height: + return False + # 检查是否与已有模型重叠 + for placed in self.placed_models: + px, py, pz = placed['position'] + pdx, pdy, pdz = placed['dimensions'] + # print("can_place2",px, py, pz, pdx, pdy, pdz) + if not ( + x + mx <= px or px + pdx <= x or + y + my <= py or py + pdy <= y or + z + mz <= pz or pz + pdz <= z + ): + return False + return True + + # 原始使用的 + def place_model(self, model): + """尝试将模型放在底面(Z=0)的一层中""" + mx, my, mz = model['dimensions'] + # 如果模型太高,直接跳过 + if mz > self.height: + print("unplaced_models1", model) + self.unplaced_models.append(model) + return False + z = 0 # 固定只放置在底层 + for y in range(0, self.depth - my + 1): + for x in range(0, self.width - mx + 1): + if self.can_place(x, y, z, model): + model['position'] = (x, y, z) + self.placed_models.append(model) + return True + print("unplaced_models2", model) + self.unplaced_models.append(model) + return False + #""" + + """ + import itertools + + def can_place2(self, position, dimensions, placed_items, container_size): + # 三维AABB碰撞检测优化版[6,8](@ref) + # 边界约束检查 + if any(position[i] + dimensions[i] > container_size[i] for i in range(3)): + return False + + # 空间网格加速检测(参考网页3) + grid_size = 50 # 网格划分粒度 + x_min = position[0] // grid_size + x_max = (position[0] + dimensions[0]) // grid_size + y_min = position[1] // grid_size + y_max = (position[1] + dimensions[1]) // grid_size + + # 获取相关网格内的模型[3](@ref) + related_items = [] + for gx in range(int(x_min), int(x_max)+1): + for gy in range(int(y_min), int(y_max)+1): + related_items.extend([item for item in placed_items + if item['grid_x'] == gx and item['grid_y'] == gy]) + + # 精确AABB检测[6](@ref) + for item in related_items: + item_pos = item['position'] + item_dim = item['dimensions'] + overlap_x = (position[0] < item_pos[0] + item_dim[0]) and (position[0] + dimensions[0] > item_pos[0]) + overlap_y = (position[1] < item_pos[1] + item_dim[1]) and (position[1] + dimensions[1] > item_pos[1]) + overlap_z = (position[2] < item_pos[2] + item_dim[2]) and (position[2] + dimensions[2] > item_pos[2]) + if overlap_x and overlap_y and overlap_z: + return False + return True + + def place_model2(self, models, container_size): + # 优化后的装箱主函数[2,5](@ref) + placed = [] + unplaced = [] + grid_size = 50 # 与can_place中保持一致 + + print(type(models[0]['dimensions'])) # 预期输出: + + # 按体积降序排序[5](@ref) + sorted_models = sorted(models, + key=lambda m: m['dimensions'][0]*m['dimensions'][1]*m['dimensions'][2], + reverse=True) + + # 支持6种旋转方向[2](@ref) + rotations = [ + (0,1,2), (0,2,1), (1,0,2), + (1,2,0), (2,0,1), (2,1,0) + ] + + for model in sorted_models: + placed_flag = False + original_dim = model['dimensions'] + + # 尝试所有旋转方向 + for rot in rotations: + rotated_dim = [original_dim[rot[0]], original_dim[rot[1]], original_dim[rot[2]]] + if rotated_dim[2] > container_size[2]: + continue # 跳过高度超标 + + # 优化搜索顺序:从右向左,从下向上[3](@ref) + for y in range(container_size[1] - rotated_dim[1], -1, -1): + for x in range(container_size[0] - rotated_dim[0], -1, -1): + if self.can_place((x,y,0), rotated_dim, placed, container_size): + # 记录网格位置 + model['grid_x'] = x // grid_size + model['grid_y'] = y // grid_size + model['position'] = (x, y, 0) + model['dimensions'] = rotated_dim + placed.append(model) + placed_flag = True + break + if placed_flag: break + if placed_flag: break + + if not placed_flag: + unplaced.append(model) + + return placed, unplaced + #""" + #""" + def can_place3(self, x, y, z, model, is_print=False): + mx, my, mz = model['dimensions'] + + """ + # 边界检查(增加扩展间距) + if (x + mx > self.width or + y + my > self.depth or + z + mz > self.height or + y<=0): + print("can_place3",False) + return False + """ + #print("placed1",x,mx,y,my) + # 边界检查(增加扩展间距) + extend_dist = 4 + if (x - mx < 0 or + y - my < 0 or + z + mz > self.height or + y>=self.depth - extend_dist): + # print("can_place3 1",False, x, mx, y, my, z, mz, self.height) + return False + + # 碰撞检测(正确逻辑与间距处理) + extend_dist_x = 4 # 与place_model3中的扩展距离一致 + extend_dist_y = 2 # 与place_model3中的扩展距离一致 + for placed in self.placed_models: + px, py, pz = placed['position'] + pdx, pdy, pdz = placed['dimensions'] + + """ + # 使用AABB碰撞检测算法[4](@ref) + if (x < px + pdx + extend_dist_x and + x + mx + extend_dist_x > px and + y < py + pdy + extend_dist_y and + y + my + extend_dist_y > py and + z < pz + pdz and + z + mz > pz): + print("can_place3",False) + return False + #""" + + # if is_print: + # print("can_place3",y,py,my,pdy,extend_dist_y) + + #""" + # print("placed2",x,px,pdx,extend_dist_x,mx,self.width) + # 使用AABB碰撞检测算法[4](@ref) + if (x > px - pdx - extend_dist_x and + x - mx - extend_dist_x < px and + y > py - pdy - extend_dist_y and + y - my - extend_dist_y < py and + z < pz + pdz and + z + mz > pz): + # print("can_place3 2",False,model,x,y,z,px,pdx,extend_dist_x,py,pdy,extend_dist_y,my,pz,pdz,pz) + return False + #""" + + # print("can_place3",True) + return True + + def place_model3(self, model, pre_model): + mx, my, mz = model['dimensions'] + if mz > self.height: + self.unplaced_models.append(model) + return False + + z = 0 + extend_dist = 4 + + if pre_model is None: + if self.first_line: + model['position'] = (mx+extend_dist, self.depth - extend_dist, 0) + print(f"First Model {model['name']}") + model['first_line'] = True + else: + model['position'] = (self.width - extend_dist, self.depth - extend_dist, 0) + model['first_line'] = False + + # print("model position1", model['name'], model['position']) + self.placed_models.append(model) + return True + + pre_px, pre_py, pre_pz = pre_model['position'] + pre_mx, pre_my, pre_mz = pre_model['dimensions'] + if self.first_line: + px = pre_px + mx + extend_dist + model['first_line'] = True + else: + px = pre_px - pre_mx - extend_dist + model['first_line'] = False + print(model['name'], "px", px, pre_px, pre_mx) + + """ + if px + mx > self.width: + px = 0 + start_y = self.depth - my - extend_dist + for y in range(start_y, -1, -1): + if self.can_place3(px, y, z, model)==False: + y -= 1 + model['position'] = (px, y, z) + self.placed_models.append(model) + return True + else: + start_y = self.depth - my - extend_dist + for y in range(start_y, -1, -1): + if self.can_place3(px, y, z, model)==False: + y -= 1 + model['position'] = (px, y, z) + self.placed_models.append(model) + return True + """ + reach_limit_x = False + if self.first_line: + if px > self.width: + reach_limit_x = True + else: + if px - mx < 0: + reach_limit_x = True + + if reach_limit_x: + self.first_line = False + px = self.width - extend_dist + # final_y = self.depth - my - extend_dist + final_y = self.depth + print("reach_limit_x final_y1", my, final_y, my, extend_dist, px) + for y in range(my, final_y, +1): + # print("y",y) + if self.can_place3(px, y, z, model, True)==False: + y -= 1 + model['position'] = (px, y, z) + # print("model position2", model['name'], model['position']) + self.placed_models.append(model) + return True + else: + start_y = my + extend_dist + # final_y = self.depth - my - extend_dist + final_y = self.depth + # print("final_y2", start_y, final_y, my, extend_dist, px) + for y in range(start_y, final_y, +1): + if self.can_place3(px, y, z, model)==False: + y -= 1 + model['position'] = (px, y, z) + # print("model position2", model['name'], model['position']) + self.placed_models.append(model) + return True + + # print("model position3", model['name'], model['position']) + self.unplaced_models.append(model) + return False + #""" + def arrange_models(self, models): + # 小打印机380*345, 大打印机600*500*300 + delta = 10 + """ + x_max = -380 + delta + y_max = -345 + delta + """ + # x_length = 500 - delta + # y_length = 300 - delta + """对所有模型进行排布(单层)""" + print("⚠️ 单层放置模式:所有模型只能放在平台底面(Z=0)") + # 按高度和面积排序,优先放大模型 + models = sorted(models, key=lambda m: (-m['dimensions'][2], -m['dimensions'][0] * m['dimensions'][1])) + pre_model = None + for model in models: + # self.place_model(model) + # self.place_model2(model, container) + if self.place_model3(model, pre_model): + pre_model = model + print("arrange_models", model['name']) + + def print_results(self): + """打印排布结果""" + print("Placed Models:") + for model in self.placed_models: + print(f" - {model['name']} at {model['position']} with dimensions {model['dimensions']}") + print("Unplaced Models:") + for model in self.unplaced_models: + print(f" - {model['name']} with dimensions {model['dimensions']}") + + def get_result(self): + return self.placed_models, self.unplaced_models + + def visualize(self): + """可视化排布结果""" + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.set_title("3D Printing Layout (Single Layer)") + ax.set_xlabel("X (Width)") + ax.set_ylabel("Y (Depth)") + ax.set_zlabel("Z (Height)") + + # 绘制平台边界 + ax.bar3d(0, 0, 0, self.width, self.depth, self.height, color='lightgray', alpha=0.1, edgecolor='black') + + # 绘制已放置的模型 + colors = ['b', 'g', 'r', 'c', 'm', 'y'] + for i, model in enumerate(self.placed_models): + x, y, z = model['position'] + dx, dy, dz = model['dimensions'] + color = colors[i % len(colors)] + ax.bar3d(x, y, z, dx, dy, dz, color=color, alpha=0.3, edgecolor='k') + ax.text(x + dx / 2, y + dy / 2, z + dz / 2, model['name'], color='black', fontsize=8, ha='center') + + ax.set_xlim(0, self.width) + ax.set_ylim(0, self.depth) + ax.set_zlim(0, self.height) + plt.show() + +def get_models_box_size(weight_fix_out_dir,show_chart,dict_fix,machine_size): + """获取排版的盒子大小""" + models = [] + # for ply_file in os.listdir(weight_fix_out_dir): + for ply_file in dict_fix: + # print("get_models_box_size", ply_file) + #print(ply_file.split("_")) + bbox_with_text = ply_file.split("=") + bbox_with = bbox_with_text[-1] + #print("盒子大小",bbox_with) + #time.sleep(1000) + split_text = bbox_with.replace(".ply","").split("+") + ply_pid = bbox_with_text[0] + extend_dist = 2 + x_length = int(float(split_text[2])*100) + extend_dist + y_length = int(float(split_text[0])*100) + extend_dist + z_length = int(float(split_text[1])*100) + extend_dist + #print("get_models_box_size",x_length,y_length,z_length) + models.append({'name':ply_file,'dimensions':(int(x_length/100),int(z_length/100),int(y_length/100))}) + #print(models) + # platform = Platform(int(38500 / 100), int(34000 / 100), int(25000 / 100)) + # platform = Platform(int(60000 / 100), int(50000 / 100), int(30000 / 100)) + platform = Platform(int(machine_size[0]), int(machine_size[1]), int(machine_size[2])) + print("开始计算排序...") + platform.arrange_models(models) + platform.print_results() + if show_chart: + platform.visualize() + return platform.get_result() + +def make_merged_pcd(): + # 创建 XY, XZ 和 YZ 平面的点云 + width = 1000 # 平面的宽度 + height = 1000 # 平面的高度 + resolution = 10 # 分辨率,控制点的密集程度 + + # XY 平面 (z=0) + x = np.linspace(-width / 2, width / 2, int(width / resolution)) # X 轴范围 + y = np.linspace(-height / 2, height / 2, int(height / resolution)) # Y 轴范围 + xv, yv = np.meshgrid(x, y) # 创建网格 + zv = np.zeros_like(xv) # z 坐标恒为 0 + points_xy = np.vstack((xv.flatten(), yv.flatten(), zv.flatten())).T + + # XZ 平面 (y=0) + z_xz = np.linspace(-height / 2, height / 2, int(height / resolution)) # Z 轴范围 + x_xz = np.linspace(-width / 2, width / 2, int(width / resolution)) # X 轴范围 + xv_xz, zv_xz = np.meshgrid(x_xz, z_xz) # 创建网格 + y_xz = np.zeros_like(xv_xz) # y 坐标恒为 0 + points_xz = np.vstack((xv_xz.flatten(), y_xz.flatten(), zv_xz.flatten())).T + + # YZ 平面 (x=0) + y_yz = np.linspace(-height / 2, height / 2, int(height / resolution)) # Y 轴范围 + z_yz = np.linspace(-width / 2, width / 2, int(width / resolution)) # Z 轴范围 + yv_yz, zv_yz = np.meshgrid(y_yz, z_yz) # 创建网格 + x_yz = np.zeros_like(yv_yz) # x 坐标恒为 0 + points_yz = np.vstack((x_yz.flatten(), yv_yz.flatten(), zv_yz.flatten())).T + + # 合并三个平面的点 + all_points = np.vstack((points_xy, points_xz, points_yz)) + + # 创建Open3D点云对象 + pcd_merged = o3d.geometry.PointCloud() + pcd_merged.points = o3d.utility.Vector3dVector(all_points) + pcd_merged.paint_uniform_color([1, 0, 0]) # 设置点云颜色为红色 + + return pcd_merged + +def read_mesh(obj_path, simple=True): + mesh_obj = o3d.io.read_triangle_mesh(obj_path) + return mesh_obj + if not simple: + return mesh_obj + original_triangles = len(mesh_obj.triangles) + target_triangles = original_triangles if original_triangles <= 10000 else 10000 + if original_triangles > 10000: + mesh_obj = mesh_obj.simplify_quadric_decimation( + target_number_of_triangles=target_triangles, + maximum_error=0.0001, + boundary_weight=1.0 + ) + + return mesh_obj + +def ply_print_layout_platform(weight_fix_out_obj_dir,weight_fix_out_dir,bounds_fix_out_dir,show_chart,dict_mesh_obj,dict_fix,dict_bounds_fix,machine_size,dict_total_matrix): + """根据排版结果移动点云到指定位置""" + placed_models,unplaced_models = get_models_box_size(weight_fix_out_dir,show_chart,dict_fix,machine_size) + if len(placed_models) ==0: + print("放进打印盒的数量为0") + return + # 创建坐标系 + draw_list = [] + if show_chart: + coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0, 0, 0]) + pcd_plane = make_merged_pcd() + draw_list.append(coordinate_frame) + draw_list.append(pcd_plane) + print("ply_print_layout_platform placed_models") + for model in placed_models: + print(f" - {model['name']} at {model['position']} with dimensions {model['dimensions']}") + ply_file_name = model['name'] + move_position = model['position'] + ply_origin_path = os.path.join(weight_fix_out_dir,ply_file_name) + # print("要读取的点云数据路径",ply_origin_path) + # pcd = o3d.io.read_point_cloud(ply_origin_path) + pcd = dict_fix[ply_file_name] + # print("dict_fix read",ply_file_name,move_position) + + points = np.asarray(pcd.points) + min_bound = np.min(points, axis=0) # 获取点云的最小边界 + max_bound = np.max(points, axis=0) + min_bound[1] = max(min_bound[1], 0) + bbox_center = (min_bound + max_bound) / 2 # 计算包围盒的中心点 + bbox_extent = (max_bound - min_bound) + new_bbox = o3d.geometry.OrientedBoundingBox(center=bbox_center, + R=np.eye(3), # 旋转矩阵,默认没有旋转 + extent=bbox_extent) + x = move_position[0] + y = move_position[1] + z = move_position[2] + #move_position = np.array([x,y,z])/100 + move_position = np.array([x, y, z]) + # translation_vector = -move_position + translation_vector = move_position + pcd.translate(translation_vector) + new_bbox.translate(translation_vector) + obj_name = ply_file_name.split("=")[0]+".obj" + obj_path = os.path.join(weight_fix_out_obj_dir,obj_name) + + mesh_obj = dict_mesh_obj[obj_name] + # print("dict_mesh_obj",obj_name) + + mesh_obj.translate(translation_vector) + # o3d.io.write_triangle_mesh(obj_path, mesh_obj) + + T_trans1 = np.eye(4) + T_trans1[:3, 3] = translation_vector + dict_total_matrix[obj_name]= T_trans1 @ dict_total_matrix[obj_name] + + new_bbox_lines = o3d.geometry.LineSet.create_from_oriented_bounding_box(new_bbox) + new_bbox_lines.paint_uniform_color([1, 0, 0]) # 红色 + + ply_out_path = os.path.join(bounds_fix_out_dir, ply_file_name) + + """ + #试着旋转180,让脸朝上 + centroid = pcd.get_center() + z_mean1 = centroid[2] + + angle_deg = 180 + angle_rad = np.radians(angle_deg) # 转换为弧度 + + # 生成旋转矩阵(绕Z轴) + rotation_matrix = pcd.get_rotation_matrix_from_xyz((0, 0, angle_rad)) # 参数为(X,Y,Z轴的旋转弧度) + + # 执行旋转(绕点云中心旋转,避免位移) + center = pcd.get_center() # 获取点云质心坐标 + pcd.rotate(rotation_matrix, center=center) + + centroid = pcd.get_center() + z_mean2 = centroid[2] + + if z_mean2 > z_mean1: + rotation_matrix = pcd.get_rotation_matrix_from_xyz((-angle_rad, 0, 0)) # 参数为(X,Y,Z轴的旋转弧度) + center = pcd.get_center() # 获取点云质心坐标 + # pcd.rotate(rotation_matrix, center=center) + #""" + + # o3d.io.write_point_cloud(ply_out_path, pcd) + dict_bounds_fix[ply_file_name] = pcd + #o3d.visualization.draw_geometries([pcd,pcd_plane,coordinate_frame]) + if show_chart: + draw_list.append(pcd) + draw_list.append(new_bbox_lines) + if show_chart: + o3d.visualization.draw_geometries(draw_list) + + return placed_models + +def compute_distance2(pcd1, pcd2): + points1 = np.asarray(pcd1.points) + points2 = np.asarray(pcd2.points) + min_distance = float('inf') + + for p1 in points1: + distances = np.linalg.norm(points2 - p1, axis=1) + min_distance = min(min_distance, np.min(distances)) + + return min_distance + +def compute_distance(pcd1, pcd2): + """ + 正确计算两个点云之间距离的函数。 + 返回两个点云之间最近距离的平均值、最小值以及全部距离数组。 + """ + # 使用Open3D内置的高效方法计算距离 + # 计算pcd1中每个点到pcd2中最近点的距离 + distances = pcd1.compute_point_cloud_distance(pcd2) + distances = np.asarray(distances) + + # 计算有意义的统计量 + min_dist = np.min(distances) # 所有点中的最小距离 + mean_dist = np.mean(distances) # 距离的平均值 + + # return min_dist, mean_dist, distances + return min_dist + +def compute_distance_x(pcd1, pcd2): + points1 = np.asarray(pcd1.points)[:, 0] # 提取所有X坐标[3](@ref) + points2 = np.asarray(pcd2.points)[:, 0] + + x_diff = np.abs(points1[:, np.newaxis] - points2) + return np.min(x_diff) + +def compute_distance_y(pcd1, pcd2): + points1 = np.asarray(pcd1.points)[:, 1] # 提取所有Y坐标[3](@ref) + points2 = np.asarray(pcd2.points)[:, 1] + + y_diff = np.abs(points1.reshape(-1, 1) - points2) + return np.min(y_diff) + +def check_collision(pcd_moving, static_pcds,collision_threshold): + #print("检测碰撞中》》") + #print(f"collision_threshold{collision_threshold}") + + moving_points = np.asarray(pcd_moving.points) + min_distance_to_x_axis = np.min(np.abs(moving_points[:, 1])) # Y 坐标即为与 X 轴的距离 + #print(f"与 X 轴的最小距离: {min_distance_to_x_axis}") + min_distance_to_y_axis = np.min(np.abs(moving_points[:, 0])) # X 坐标即为与 Y 轴的距离 + #print(f"与 Y 轴的最小距离: {min_distance_to_y_axis}") + min_distance_to_z_axis = np.min(np.abs(moving_points[:, 2])) # X 坐标即为与 Y 轴的距离 + #print(f"与 Z 轴的最小距离: {min_distance_to_z_axis}") + #print(f"pcd_moving{len(static_pcds)}") + #if min_distance_to_x_axis < collision_threshold: + #print(f"与 X 轴发生碰撞! 最小距离: {min_distance_to_x_axis}") + # return True + #if min_distance_to_y_axis < collision_threshold: + #print(f"与 Y 轴发生碰撞! 最小距离: {min_distance_to_y_axis}") + # return True + if len(static_pcds)>0: + for static_pcd in static_pcds: + if static_pcd==pcd_moving: + continue + min_distance = compute_distance(pcd_moving, static_pcd) + #print(f"与点云的最小距离: {min_distance}") + if min_distance < collision_threshold: + #print(f"发生碰撞! 最小距离: {min_distance}") + return True + return False + +def check_collision_x(pcd_moving, static_pcds,collision_threshold): + moving_points = np.asarray(pcd_moving.points) + min_distance_to_x_axis = np.min(np.abs(moving_points[:, 1])) # Y 坐标即为与 X 轴的距离 + #print(f"与 X 轴的最小距离: {min_distance_to_x_axis}") + #print(f"与 Y 轴的最小距离: {min_distance_to_y_axis}") + #print(f"pcd_moving{len(static_pcds)}") + if min_distance_to_x_axis < collision_threshold: + print(f"与 X 轴发生碰撞! 最小距离: {min_distance_to_x_axis}") + return True + + return check_collision_all(pcd_moving, static_pcds,collision_threshold) + +def check_collision_y(pcd_moving, static_pcds,collision_threshold): + moving_points = np.asarray(pcd_moving.points) + #print(f"与 X 轴的最小距离: {min_distance_to_x_axis}") + min_distance_to_y_axis = np.min(np.abs(moving_points[:, 0])) # X 坐标即为与 Y 轴的距离 + #print(f"与 Y 轴的最小距离: {min_distance_to_y_axis}") + #print(f"pcd_moving{len(static_pcds)}") + if min_distance_to_y_axis < collision_threshold: + print(f"与 Y 轴发生碰撞! 最小距离: {min_distance_to_y_axis}") + return True + + return check_collision_all(pcd_moving, static_pcds,collision_threshold) + +def check_collision_all2(pcd_moving, static_pcds,collision_threshold): + if len(static_pcds)>0: + for static_pcd in static_pcds: + if static_pcd==pcd_moving: + continue + min_distance = compute_distance(pcd_moving, static_pcd) + #print(f"与点云的最小距离: {min_distance}") + if min_distance < collision_threshold: + #print(f"发生碰撞! 最小距离: {min_distance}") + return True + return False + +""" +import numpy as np +import numba +from numba import cuda +import math + +# 预加载静态点云到GPU显存 +static_gpu_arrays = [] + +def preload_static_pcds(static_pcds): + global static_gpu_arrays + static_gpu_arrays = [ + cuda.to_device(np.asarray(pcd.points)) + for pcd in static_pcds + ] + +@cuda.jit(device=True) +def point_distance(p1, p2): + dx = p1[0] - p2[0] + dy = p1[1] - p2[1] + dz = p1[2] - p2[2] + return math.sqrt(dx*dx + dy*dy + dz*dz) + +@cuda.jit +def collision_check_kernel(moving_points, static_points, threshold, collision_flag): + # 三维线程索引划分 + x, y, z = cuda.grid(3) + + # 获取当前处理的静态点云索引 + static_idx = z + + if static_idx >= len(static_points): + return + + # 共享内存缓存移动点云数据 + shared_moving = cuda.shared.array(shape=(32,3), dtype=numba.float32) + tx = cuda.threadIdx.x + if tx < moving_points.shape[0]: + shared_moving[tx, 0] = moving_points[tx, 0] + shared_moving[tx, 1] = moving_points[tx, 1] + shared_moving[tx, 2] = moving_points[tx, 2] + cuda.syncthreads() + + # 遍历当前静态点云的所有点 + static_pt = static_points[static_idx][y] + min_dist = math.inf + + # 并行计算移动点云各点与当前静态点的距离 + for i in range(moving_points.shape[0]): + dist = point_distance(shared_moving[i], static_pt) + if dist < min_dist: + min_dist = dist + + # 原子操作更新碰撞状态 + if min_dist < threshold: + cuda.atomic.min(collision_flag, 0, 1) + +def check_collision_all(pcd_moving, static_pcds, collision_threshold): + # 移动点云数据上传GPU + moving_points = np.asarray(pcd_moving.points) + d_moving = cuda.to_device(moving_points.astype(np.float32)) + + # 初始化碰撞标志 + d_collision = cuda.to_device(np.zeros(1, dtype=np.int32)) + + # 三维网格划分(静态点云数×单点云最大点数×移动点云数) + static_count = len(static_gpu_arrays) + max_static_points = max([arr.shape[0] for arr in static_gpu_arrays]) + + # 计算最优线程块配置 + threads_per_block = (8, 8, 1) + blocks_x = (moving_points.shape[0] + 7) // 8 + blocks_y = (max_static_points + 7) // 8 + blocks_z = static_count + + # 启动核函数 + collision_check_kernel[(blocks_x, blocks_y, blocks_z), threads_per_block]( + d_moving, + [arr for arr in static_gpu_arrays], # 静态点云列表 + np.float32(collision_threshold), + d_collision + ) + + # 获取结果 + collision_result = d_collision.copy_to_host() + return collision_result[0] == 1 +#""" +#""" +import numpy as np + +def compute_aabb(pcd): + """计算点云的AABB包围盒""" + points = np.asarray(pcd.points) + return { + 'min': np.min(points, axis=0), + 'max': np.max(points, axis=0) + } + +def aabb_intersect(a, b, collision_threshold): + """判断两个AABB包围盒是否相交[2,8](@ref)""" + return (a['max'][0] > b['min'][0] - collision_threshold and a['min'][0] < b['max'][0] + collision_threshold) and \ + (a['max'][1] > b['min'][1] - collision_threshold and a['min'][1] < b['max'][1] + collision_threshold) and \ + (a['max'][2] > b['min'][2] - collision_threshold and a['min'][2] < b['max'][2] + collision_threshold) + +def check_collision_all(pcd_moving, static_pcds, collision_threshold): + # 预计算移动点云AABB + moving_aabb = compute_aabb(pcd_moving) + + for static_pcd in static_pcds: + if static_pcd == pcd_moving: + continue + # 第一阶段:AABB快速排除[1,6](@ref) + static_aabb = compute_aabb(static_pcd) + # print("len(static_pcd.points)=",len(static_pcd.points),"len(moving_aabb.points)=",len(pcd_moving.points)) + if not aabb_intersect(moving_aabb, static_aabb, collision_threshold): + continue # 包围盒无交集,直接跳过 + + if not aabb_intersect(moving_aabb, static_aabb, collision_threshold): + return False + + # 第二阶段:精确点距离计算 + min_distance = compute_distance(pcd_moving, static_pcd) + # print("check_collision_all",min_distance) + if min_distance < collision_threshold: + return True + + return False + +#""" + +def compute_centroid(pcd): + # 获取点云的所有点 + points = np.asarray(pcd.points) + # 计算质心(只考虑 X 和 Y 坐标) + centroid = np.mean(points[:, :2], axis=0) # 只考虑前两个维度(X 和 Y) + return centroid + +def compute_distance_to_origin(centroid): + # 计算质心距离原点的距离(只考虑 X 和 Y 坐标) + return np.linalg.norm(centroid) # 计算 X 和 Y 的欧几里得距离 + +def compute_closest_distance_to_origin(pcd): + # 获取点云的所有点坐标 + points = np.asarray(pcd.points) + # 计算每个点到原点的距离 + distances = np.linalg.norm(points, axis=1) + # 返回最小距离 + return np.min(distances) + +def sort_ply_files_by_closest_distance(folder_path): + ply_files = [f for f in os.listdir(folder_path) if f.endswith('.ply')] + distances = [] + + for ply_file in ply_files: + # 读取点云数据 + pcd = o3d.io.read_point_cloud(os.path.join(folder_path, ply_file)) + # 计算离原点最近的点的距离 + closest_distance = compute_closest_distance_to_origin(pcd) + distances.append((ply_file, closest_distance)) + + # 按照最近点的距离排序(由近到远) + distances.sort(key=lambda x: x[1]) + + # 返回排序后的文件列表 + sorted_files = [item[0] for item in distances] + print("Sorted files:", sorted_files) + return sorted_files + +def ply_file_at_edge(folder_path): + ply_files = [f for f in os.listdir(folder_path) if f.endswith('.ply')] + edge_points = [] + + for ply_file in ply_files: + # 读取点云数据 + pcd = o3d.io.read_point_cloud(os.path.join(folder_path, ply_file)) + points = np.asarray(pcd.points) # Nx3 的点阵 + x = points[:, 0] + y = points[:, 1] + z = points[:, 2] + + # 计算每个点到 X 轴(Y=Z=0)的距离:√(y² + z²) + dist_to_x_axis = np.sqrt(y ** 2 + z ** 2) + + # 计算每个点到 Y 轴(X=Z=0)的距离:√(x² + z²) + dist_to_y_axis = np.sqrt(x ** 2 + z ** 2) + + # 示例:最小距离和对应的点 + min_x_dist_idx = np.argmin(dist_to_x_axis) + min_y_dist_idx = np.argmin(dist_to_y_axis) + print(f"点云中到 X 轴最近的距离: {dist_to_x_axis[min_x_dist_idx]:.4f}") + print(f"点云中到 Y 轴最近的距离: {dist_to_y_axis[min_y_dist_idx]:.4f}") + min_x_dis = dist_to_x_axis[min_x_dist_idx] + min_y_dix = dist_to_y_axis[min_y_dist_idx] + if min_x_dis<20: + edge_points.append(ply_file) + if min_y_dix<20: + edge_points.append(ply_file) + edge_points=list(set(edge_points)) + print(f"edge_points{edge_points}") + print(len(edge_points)) + + + return edge_points + +def compute_range(pcd): + points = np.asarray(pcd.points) # 获取点云中的点 + x_min, y_min = np.min(points[:, 0]), np.min(points[:, 1]) # X轴和Y轴的最小值 + x_max, y_max = np.max(points[:, 0]), np.max(points[:, 1]) # X轴和Y轴的最大值 + return (x_min, x_max), (y_min, y_max) + +def check_check_finish_which_touch(bounds_compact_out_dir,finish_pcd,finish_pid,exist_finish_pid_list,collision_threshold): + """""" + if len(exist_finish_pid_list)==0: + return [] + need_remove_list = [] + for exist_finish_pid in exist_finish_pid_list: + if exist_finish_pid!=finish_pid: + exist_finish_pcd_path= os.path.join(bounds_compact_out_dir, exist_finish_pid) + exist_finish_pcd= o3d.io.read_point_cloud(exist_finish_pcd_path) + min_distance = compute_distance(finish_pcd, exist_finish_pcd) + print(f"{exist_finish_pid}与点云的最小距离---阈值{collision_threshold}---: {min_distance}") + if min_distance < collision_threshold: + # print(f"发生碰撞! 最小距离: {min_distance}") + need_remove_list.append(exist_finish_pid) + print(f"需要删除的list::{need_remove_list}") + return need_remove_list + + +def filter_by_distance(points, original_center, threshold): + """ + 过滤掉距离 original_center 超过 threshold 的点。 + + :param points: N x 3 的 numpy 数组 + :param original_center: 1 x 3 的 numpy 数组或列表 + :param threshold: 距离阈值 + :return: 过滤后的点(仍是 N x 3 的 numpy 数组) + """ + points = np.array(points) + original_center = np.array(original_center) + + # 计算所有点到原始中心的欧几里得距离 + distances = np.linalg.norm(points - original_center, axis=1) + print(f"移动点和原始点的距离{distances}") + # 保留距离小于等于阈值的点 + filtered_points = points[distances <= threshold] + if len(filtered_points)==0: + return points + + return filtered_points + +def compact_mode_for_min_dis(input_dir, output_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size): + # 小打印机380*345, 大打印机600*500*300 + y_step=1 + x_step=1 + delta = 10 + #""" + edge_x_min=-380 + delta + edge_y_min=-345 + delta + edge_x_max=0 + edge_y_max=0 + #""" + """ + edge_x_min=0 + delta + edge_y_min=0 + delta + edge_x_max=machine_size[0] + edge_y_max=machine_size[1] + #""" + collision_threshold=2 + move_last = True + # 清理输出目录 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for f in os.listdir(output_dir): + os.remove(os.path.join(output_dir, f)) + + pcd_all = [] + pcd_processed = [] + pcd_processed_x_top = [] + pcd_processed_no_x_top = [] + name_list = [] + model_list = [] + last_pcd_list = [] + last_name_list = [] + last_pcd_processed = [] + max_x = 0 + min_x = 9999 + max_delta_x = 0 + x_top_delta = 1 + border_delta = 4 + + pcd_first= [] + pcd_second= [] + for model in placed_models: + ply_origin_path = os.path.join(input_dir,model['name']) + # pcd = o3d.io.read_point_cloud(ply_origin_path) + pcd = dict_bounds_fix[model['name']] + pcd_all.append(pcd) + + if (get_axis_aligned_bbox(pcd)['y_min']>edge_y_max*0.3): + # if (True): + pcd_first.append(pcd) + print("add pcd_first", model['name']) + else: + pcd_second.append(pcd) + print("add pcd_second", model['name']) + last_name_list.append(model['name']) + + name_list.append(model['name']) + model_list.append(model) + dx = model['dimensions'][0] + x = model['position'][0] + + # if (x<=x_top_delta) : + if (x>=edge_x_max-x_top_delta) : + pcd_processed_x_top.append(pcd) + if dx > max_x: + max_x = dx + if dx < min_x: + min_x = dx + max_delta_x = max_x - min_x + + # print("compact_mode_for_min_dis", model, max_delta_x) + + draw_down = True + if max_delta_x < 10: + draw_down = False + #move_last = False + + # for idx, pcd in enumerate(pcd_all): + for idx, pcd in enumerate(pcd_first): + x = model_list[idx]['position'][0] + y = model_list[idx]['position'][1] + dx = model_list[idx]['dimensions'][0] + # print("compact_mode", name_list[idx], dx, x) + dist_x = 50 + dist_y = 20 + is_x_top = False + if x - 10 < edge_x_min: + dist_x = x - edge_x_min + if y - 10 < edge_y_min: + dist_y = y - edge_y_min + # if (x>x_top_delta) : + if (x 80: + y_init_big = 10 + x_init_big = y_init_big - 1 + """ + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([-x_step_big, -y_step_big, 0]) + print("compact_mode y_max", idx, bbox['y_max'], edge_y_max - collision_threshold_big) + break + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step_big, -y_step_big, 0]) + print("compact_mode x_max", idx, bbox['x_max'], edge_x_max - collision_threshold_big) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big): + pcd.translate([-x_step_big, -y_step_big, 0]) + break + pcd.translate([x_step_big, y_step_big, 0]) + #""" + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + # print("x_max",bbox['x_max'],bbox['x_min'],bbox['y_max'],bbox['y_min']) + if bbox['y_min'] <= edge_y_min + collision_threshold_big and False: + pcd.translate([0, y_step_big, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_big: + pcd.translate([0, -y_step_big, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+y_init_big): #5 + pcd.translate([0, -y_step_big, 0]) + break + pcd.translate([0, y_step_big, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_big and False: + pcd.translate([x_step_big, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_big: + pcd.translate([-x_step_big, 0, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+x_init_big): + pcd.translate([-x_step_big, 0, 0]) + break + pcd.translate([x_step_big, 0, 0]) + #""" + #""" + collision_threshold_init = collision_threshold+6 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + # if bbox['y_max'] >= edge_y_max - collision_threshold_init: + if bbox['y_max'] >= edge_y_max - border_delta: + pcd.translate([0, -y_step, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + break + # if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if bbox['x_max'] >= edge_x_max - border_delta: + pcd.translate([-x_step, 0, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + pcd.translate([-x_step, 0, 0]) + break + pcd.translate([x_step, 0, 0]) + #""" + #""" + collision_threshold_init = collision_threshold+2 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_step, 0, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + pcd.translate([-x_step, 0, 0]) + break + pcd.translate([x_step, 0, 0]) + # place again + collision_threshold_init = collision_threshold+1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): #5 + pcd.translate([0, -y_step, 0]) + break + pcd.translate([0, y_step, 0]) + #""" + pcd_processed.append(pcd) + pcd_processed_x_top.append(pcd) + if not is_x_top: + pcd_processed_no_x_top.append(pcd) + + cross_border = False + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + 1 or bbox['y_min'] <= edge_y_min + 1: + cross_border = True + print("coross_border",name_list[idx]) + + print(name_list[idx],"dx",dx,"y_min",get_axis_aligned_bbox(pcd)['y_min'],cross_border) + # if (dx <= 120 and get_axis_aligned_bbox(pcd)['y_min'] < -250 and move_last) or cross_border: + # last_name_list.append(name_list[idx]) + # last_pcd_list.append(pcd) + # print("last_pcd_list",name_list[idx],"dx",dx,"y_min",get_axis_aligned_bbox(pcd)['y_min']) + # else: + # # o3d.io.write_point_cloud(os.path.join(output_dir, name_list[idx]), pcd) + # dict_compact[name_list[idx]] = pcd + # last_pcd_processed.append(pcd) + + if cross_border: + pcd_second.append(pcd) + last_name_list.append(name_list[idx]) + print("cross_border", name_list[idx]) + else: + print("Add dict_compact", name_list[idx]) + dict_compact[name_list[idx]] = pcd + last_pcd_processed.append(pcd) + + volumes = [] + # for idx, pcd in enumerate(last_pcd_list): + for idx, pcd in enumerate(pcd_second): + bbox = get_axis_aligned_bbox(pcd) + + x_length = bbox['x_max'] - bbox['x_min'] + y_length = bbox['y_max'] - bbox['y_min'] + z_length = bbox['z_max'] - bbox['z_min'] + volume = x_length * y_length * z_length + volumes.append(volume) + + print("last_pcd_list", len(last_pcd_list), len(last_pcd_list), len(last_pcd_processed), len(pcd_all)) + sorted_indices = np.argsort(volumes)[::-1] + pcd_second2 = [pcd_second[i] for i in sorted_indices] + last_name_list2 = [last_name_list[i] for i in sorted_indices] + # print("last_pcd_list2", len(last_pcd_list2)) + + #last_pcd_list2 = last_pcd_list + #last_name_list2 = last_name_list + + for idx, pcd in enumerate(pcd_second2): + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + max_y = np.max(points[:, 1]) # 当前最大y值 + + tx = edge_x_min - min_x + ty = -max_y - 0.001 + + # pcd.translate((tx, ty, 0), relative=True) + + move_to_top_left(pcd, edge_x_min+2, edge_y_max-2) + + name = last_name_list2[idx] + # print("pcd_second2",name,"tx",tx,"ty",ty) + succ_move = True + y_accum = 0 + finish_move2 = False + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_min'] <= edge_y_min + collision_threshold_init: + print("succ_move False",name,bbox['y_min'],edge_y_min + collision_threshold_init) + succ_move = False + finish_move2 = True + """ + move_to_top_left(pcd, edge_x_min+2, edge_y_max-2) + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init2 = collision_threshold + if bbox['y_min'] <= edge_y_min + collision_threshold_init2: + print("succ_move False2",name,bbox['y_min'],edge_y_min + collision_threshold_init2) + succ_move = False + finish_move2 = True + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init2): + print("succ_move 2") + finish_move2 = True + break + pcd.translate([0, -y_step, 0]) + """ + + if (finish_move2): + break + else: + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + print("succ_move1",name,bbox['x_max'],bbox['y_max'],len(last_pcd_processed)) + break + + pcd.translate([0, -y_step, 0]) + y_accum += y_step + if succ_move: + print("succ_move2", name) + """ + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_max'] >= edge_x_max - collision_threshold_big: + pcd.translate([-x_step_big, 0, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_big): + pcd.translate([-x_step_big, 0, 0]) + break + pcd.translate([x_step_big, 0, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([0, -y_step, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold+1): #5 + pcd.translate([0, -y_step, 0]) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step, 0, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold): + pcd.translate([-x_step, 0, 0]) + break + pcd.translate([x_step, 0, 0]) + #""" + + #""" + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + #print("Move x_step_big", name, bbox['y_max'], bbox['x_max']) + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_accu, 0, 0]) + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + #print("Move y_max", name, bbox['y_max'], bbox['x_max']) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + #print("Move y_max2", name, bbox['y_max'], bbox['x_max']) + break + pcd.translate([0, y_step_big, 0]) + #print("Move y_step_big", name, bbox['y_max'], bbox['x_max']) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + #""" + else: + # pcd.translate((-tx, -ty+y_accum, 0), relative=True) + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + min_y = np.min(points[:, 1]) + tx = edge_x_min - min_x + ty = edge_y_min - min_y + pcd.translate((tx, ty, 0), relative=True) + + print("last place", name) + + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+10 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + print("fail to place",name) + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + print("last place2",name) + break + pcd.translate([x_step_big, 0, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+3 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + print("last place3",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): #5 + pcd.translate([0, -y_step_big, 0]) + print("last place4",name,collision_threshold_init,len(last_pcd_processed)) + break + pcd.translate([0, y_step_big, 0]) + print("last place41",y_step_big) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([0, -y_step, 0]) + print("last place5",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold+1): #5 + pcd.translate([0, -y_step, 0]) + print("last place6",name) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step, 0, 0]) + print("last place7",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold): + pcd.translate([-x_step, 0, 0]) + print("last place8",name) + break + pcd.translate([x_step, 0, 0]) + #""" + + """ + can_place_last = False + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([-x_accu, 0, 0]) + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + break + pcd.translate([0, y_step_big, 0]) + #print("Move2 y_step_big", name) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + #""" + #""" + can_place_last = False + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([-x_accu, 0, 0]) + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + break + pcd.translate([0, y_step_big, 0]) + #print("Move2 y_step_big", name) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + #""" + + last_pcd_processed.append(pcd) + """ + print("is_x_top",is_x_top) + if not is_x_top: + last_pcd_processed.append(pcd) + print("last_pcd_processed.append",name) + else: + print("fail last_pcd_processed.append",name, is_x_top) + """ + # o3d.io.write_point_cloud(os.path.join(output_dir, name), pcd) + dict_compact[name] = pcd + + print("2Add dict_compact", name) + +def compact_mode_for_min_dis1_json(input_dir,output_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix): + # 小打印机380*345*250, 大打印机600*500*300 + y_step=1 + x_step=1 + delta = 10 + + edge_x_min=0 + delta + edge_y_min=0 + delta + edge_x_max=machine_size[0] + edge_y_max=machine_size[1] + + collision_threshold=2 + move_last = True + + # 清理输出目录 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for f in os.listdir(output_dir): + os.remove(os.path.join(output_dir, f)) + + pcd_all = [] + pcd_processed = [] + pcd_processed_x_top = [] + pcd_processed_no_x_top = [] + # name_list = [] + dict_name = {} + # model_list = [] + dict_model = {} + last_pcd_list = [] + # last_name_list = [] + dic_last_name = {} + last_pcd_processed = [] + max_x = machine_size[0] + min_x = 0 + max_delta_x = 0 + x_top_delta = 1 + border_delta = 4 + + pcd_first= [] + pcd_second= [] + + index = 0 + for model in placed_models: + pcd = dict_bounds_fix[model['name']] + + num_samples = min(1500, len(pcd.points)) + + pcd_all.append(pcd) + + if (get_axis_aligned_bbox(pcd)['y_min']>edge_y_max*0.3 or True): + pcd_first.append(pcd) + else: + pcd_second.append(pcd) + + # pcd_all.append(pcd_downsampled) + # name_list.append(model['name']) + # model_list.append(model) + dict_name[pcd] = model['name'] + dict_model[pcd] = model + dx = model['dimensions'][0] + x = model['position'][0] + + if (x>=edge_x_max-x_top_delta) : + pcd_processed_x_top.append(pcd) + print("pcd_processed_x_top", model['name']) + if dx > max_x: + max_x = dx + if dx < min_x: + min_x = dx + max_delta_x = max_x - min_x + + index += 1 + + # print("compact_mode_for_min_dis1_json", model, max_delta_x) + + draw_down = False + if max_delta_x < 10: + draw_down = False + + # for idx, pcd in enumerate(pcd_all): + for idx, pcd in enumerate(pcd_first): + + if dict_model[pcd]['first_line']: + pcd_processed.append(pcd) + last_pcd_processed.append(pcd) + continue + + x = dict_model[pcd]['position'][0] + y = dict_model[pcd]['position'][1] + dx = dict_model[pcd]['dimensions'][0] + print("compact_mode", dict_name[pcd], dx, x) + + ply_file_name = dict_name[pcd] + obj_name = ply_file_name.split("=")[0]+".obj" + + T_trans1 = np.eye(4) + + dist_x = 50 + dist_y = 20 + is_x_top = False + if x - 10 < edge_x_min: + dist_x = x - edge_x_min + if y - 10 < edge_y_min: + dist_y = y - edge_y_min + if (x 80: + y_init_big = 10 + x_init_big = y_init_big - 1 + + if check_collision_all(pcd, pcd_processed_curr, 1): + while True: + step = 25 + pcd.translate([0, -step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -step, 0] + T_trans1 = T_transTemp @ T_trans1 + # pcd.translate([-step, 0, 0]) + # T_transTemp[:3, 3] = [-step, 0, 0] + # T_trans1 = T_transTemp @ T_trans1 + + if not check_collision_all(pcd, pcd_processed_curr, collision_threshold_big): + break + + """ + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([-x_step_big, -y_step_big, 0]) + print("compact_mode y_max", idx, bbox['y_max'], edge_y_max - collision_threshold_big) + break + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step_big, -y_step_big, 0]) + print("compact_mode x_max", idx, bbox['x_max'], edge_x_max - collision_threshold_big) + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big): + pcd.translate([-x_step_big, -y_step_big, 0]) + break + pcd.translate([x_step_big, y_step_big, 0]) + #""" + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + # print("x_max",bbox['x_max'],bbox['x_min'],bbox['y_max'],bbox['y_min']) + if bbox['y_min'] <= edge_y_min + collision_threshold_big and False: + pcd.translate([0, y_step_big, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_big: + pcd.translate([0, -y_step_big, 0]) + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+y_init_big): #5 + pcd.translate([0, -y_step_big, 0]) + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_big and False: + pcd.translate([x_step_big, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_big: + pcd.translate([-x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+x_init_big): + pcd.translate([-x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + collision_threshold_init = collision_threshold+6 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + # if bbox['y_max'] >= edge_y_max - collision_threshold_init: + if bbox['y_max'] >= edge_y_max - border_delta: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + break + # if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if bbox['x_max'] >= edge_x_max - border_delta: + # print("1pcd.translate([-x_step, 0, 0])",name_list[idx]) + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + # print("2pcd.translate([-x_step, 0, 0])",name_list[idx]) + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + collision_threshold_init = collision_threshold+2 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + # place again + collision_threshold_init = collision_threshold+1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + pcd_processed.append(pcd) + pcd_processed_x_top.append(pcd) + if not is_x_top: + pcd_processed_no_x_top.append(pcd) + + cross_border = False + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + 1 or bbox['y_min'] <= edge_y_min + 1: + cross_border = True + print("coross_border",ply_file_name) + + # print(ply_file_name,"dx",dx,"y_min",get_axis_aligned_bbox(pcd)['y_min'],cross_border) + # if (get_axis_aligned_bbox(pcd)['y_min']= edge_x_max - collision_threshold_big: + pcd.translate([-x_step_big, 0, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_big): + pcd.translate([-x_step_big, 0, 0]) + break + pcd.translate([x_step_big, 0, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([0, -y_step, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold+1): #5 + pcd.translate([0, -y_step, 0]) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step, 0, 0]) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold): + pcd.translate([-x_step, 0, 0]) + break + pcd.translate([x_step, 0, 0]) + #""" + + #""" + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + #print("Move x_step_big", name, bbox['y_max'], bbox['x_max']) + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_max", name, bbox['y_max'], bbox['x_max']) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_max2", name, bbox['y_max'], bbox['x_max']) + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_step_big", name, bbox['y_max'], bbox['x_max']) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + else: + # T_transTemp = move_to_bottom_left(pcd, edge_x_min, edge_y_min) + T_transTemp = move_to_bottom_right(pcd, edge_x_max, edge_y_min) + T_trans1 = T_transTemp @ T_trans1 + + print("last place", name) + + """ + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+10 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + print("fail to place",name) + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + print("last place2",name) + break + pcd.translate([x_step_big, 0, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+3 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + print("last place3",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): #5 + pcd.translate([0, -y_step_big, 0]) + print("last place4",name,collision_threshold_init,len(last_pcd_processed)) + break + pcd.translate([0, y_step_big, 0]) + print("last place41",y_step_big) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_max'] >= edge_y_max - collision_threshold: + pcd.translate([0, -y_step, 0]) + print("last place5",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold+1): #5 + pcd.translate([0, -y_step, 0]) + print("last place6",name) + break + pcd.translate([0, y_step, 0]) + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_max'] >= edge_x_max - collision_threshold: + pcd.translate([-x_step, 0, 0]) + print("last place7",name) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold): + pcd.translate([-x_step, 0, 0]) + print("last place8",name) + break + pcd.translate([x_step, 0, 0]) + #""" + + """ + can_place_last = False + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([-x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move2 y_step_big", name) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + + #""" + can_place_last = False + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([-x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move2 y_step_big", name) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + + """ + can_place_last = False + x_accu = 0 + place_first = True + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_min'] <= edge_x_min + collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([+x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [+x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + place_first = True + print("place_first True") + break + can_break = False + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + can_break = True + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + if place_first: + can_break = True + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move2 y_step_big", name) + else: + n = 1 + + if can_break: + break + + x_accu += x_step_big + pcd.translate([-x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + + last_pcd_processed.append(pcd) + """ + print("is_x_top",is_x_top) + if not is_x_top: + last_pcd_processed.append(pcd) + print("last_pcd_processed.append",name) + else: + print("fail last_pcd_processed.append",name, is_x_top) + """ + # o3d.io.write_point_cloud(os.path.join(output_dir, name), pcd) + dict_compact[name] = pcd + + dict_total_matrix[obj_name]= T_trans1 @ dict_total_matrix[obj_name] + +def compact_mode_for_min_dis2_json(input_dir,output_dir,show_chart,move_back,placed_models,dict_unplaced,dict_bounds_fix,dict_compact,machine_size,dict_total_matrix): + # 小打印机380*345, 大打印机600*500*300 + y_step=1 + x_step=1 + delta = 10 + + edge_x_min=0 + delta + edge_y_min=0 + delta + edge_x_max=machine_size[0] + edge_y_max=machine_size[1] + + collision_threshold=2 + # move_last = True + move_last = False + + # 清理输出目录 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for f in os.listdir(output_dir): + os.remove(os.path.join(output_dir, f)) + + pcd_all = [] + pcd_processed = [] + pcd_processed_x_top = [] + pcd_processed_no_x_top = [] + # name_list = [] + dict_name = {} + # model_list = [] + dict_model = {} + last_pcd_list = [] + # last_name_list = [] + dic_last_name = {} + last_pcd_processed = [] + max_x = machine_size[0] + min_x = 0 + max_delta_x = 0 + x_top_delta = 1 + border_delta = 4 + for model in placed_models: + pcd = dict_bounds_fix[model['name']] + + pcd_all.append(pcd) + + dict_name[pcd] = model['name'] + dict_model[pcd] = model + dx = model['dimensions'][0] + x = model['position'][0] + + if (x>=edge_x_max-x_top_delta) : + pcd_processed_x_top.append(pcd) + print("pcd_processed_x_top", model['name']) + if dx > max_x: + max_x = dx + if dx < min_x: + min_x = dx + max_delta_x = max_x - min_x + + # print("compact_mode_for_min_dis2_json", model, max_delta_x) + + # draw_down = True + draw_down = False + if max_delta_x < 10: + draw_down = False + + for idx, pcd in enumerate(pcd_all): + x = dict_model[pcd]['position'][0] + y = dict_model[pcd]['position'][1] + dx = dict_model[pcd]['dimensions'][0] + # print("compact_mode", dict_name[pcd], dx, x) + + ply_file_name = dict_name[pcd] + obj_name = ply_file_name.split("=")[0]+".obj" + + T_trans1 = np.eye(4) + + dist_x = 50 + dist_y = 20 + is_x_top = False + if x - 10 < edge_x_min: + dist_x = x - edge_x_min + if y - 10 < edge_y_min: + dist_y = y - edge_y_min + if (x 80: + y_init_big = 10 + x_init_big = y_init_big - 1 + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + # print("x_max",bbox['x_max'],bbox['x_min'],bbox['y_max'],bbox['y_min']) + if bbox['y_min'] <= edge_y_min + collision_threshold_big and False: + pcd.translate([0, y_step_big, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_big: + pcd.translate([0, -y_step_big, 0]) + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+y_init_big): #5 + pcd.translate([0, -y_step_big, 0]) + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_big and False: + pcd.translate([x_step_big, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_big: + pcd.translate([-x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_big+x_init_big): + pcd.translate([-x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + collision_threshold_init = collision_threshold+6 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + # if bbox['y_max'] >= edge_y_max - collision_threshold_init: + if bbox['y_max'] >= edge_y_max - border_delta: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + # if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if bbox['x_max'] >= edge_x_max - border_delta: + # print("1pcd.translate([-x_step, 0, 0])",name_list[idx]) + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + # print("2pcd.translate([-x_step, 0, 0])",name_list[idx]) + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + #""" + collision_threshold_init = collision_threshold+2 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init+1): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + collision_threshold_init and False: + pcd.translate([x_step, 0, 0]) + break + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): + pcd.translate([-x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([x_step, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + # place again + collision_threshold_init = collision_threshold+1 + while True: + bbox = get_axis_aligned_bbox(pcd) + if bbox['y_min'] <= edge_y_min + collision_threshold_init and False: + pcd.translate([0, y_step, 0]) + break + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, pcd_processed_curr,collision_threshold_init): #5 + pcd.translate([0, -y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + pcd_processed.append(pcd) + pcd_processed_x_top.append(pcd) + if not is_x_top: + pcd_processed_no_x_top.append(pcd) + + cross_border = False + bbox = get_axis_aligned_bbox(pcd) + if bbox['x_min'] <= edge_x_min + 1 or bbox['y_min'] <= edge_y_min + 1: + cross_border = True + print("coross_border",ply_file_name) + + print(ply_file_name,"dx",dx,"y_min",get_axis_aligned_bbox(pcd)['y_min'],cross_border) + if (dx <= 120 and get_axis_aligned_bbox(pcd)['y_min'] < 250 and move_last) or cross_border: + last_pcd_list.append(pcd) + dic_last_name[pcd] = ply_file_name + print("last_pcd_list",ply_file_name,"dx",dx,"y_min",get_axis_aligned_bbox(pcd)['y_min']) + else: + # o3d.io.write_point_cloud(os.path.join(output_dir, name_list[idx]), pcd) + dict_compact[ply_file_name] = pcd + last_pcd_processed.append(pcd) + + dict_total_matrix[obj_name]= T_trans1 @ dict_total_matrix[obj_name] + + volumes = [] + for idx, pcd in enumerate(last_pcd_list): + bbox = get_axis_aligned_bbox(pcd) + + x_length = bbox['x_max'] - bbox['x_min'] + y_length = bbox['y_max'] - bbox['y_min'] + z_length = bbox['z_max'] - bbox['z_min'] + volume = x_length * y_length * z_length + volumes.append(volume) + + print(f"last_pcd_list len(last_pcd_list)={len(last_pcd_list)},len(last_pcd_processed)={len(last_pcd_processed)},len(pcd_all)={len(pcd_all)}") + sorted_indices = np.argsort(volumes)[::-1] + last_pcd_list2 = [last_pcd_list[i] for i in sorted_indices] + # last_name_list2 = [last_name_list[i] for i in sorted_indices] + print("last_pcd_list2", len(last_pcd_list2)) + + for idx, pcd in enumerate(last_pcd_list2): + + ply_file_name = dict_name[pcd] + obj_name = ply_file_name.split("=")[0]+".obj" + + print("last_pcd_list2", obj_name) + + T_trans1 = np.eye(4) + + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + max_y = np.max(points[:, 1]) # 当前最大y值 + + tx = edge_x_min - min_x + ty = -max_y - 0.001 + + T_transTemp = move_to_top_left(pcd, edge_x_min+2, edge_y_max-2) + + T_trans1 = T_transTemp @ T_trans1 + + name = dict_name[pcd] + # print("last_pcd_list2",name,"tx",tx,"ty",ty) + succ_move = True + y_accum = 0 + finish_move2 = False + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_min'] <= edge_y_min + collision_threshold_init: + print("succ_move False",name,bbox['y_min'],edge_y_min + collision_threshold_init) + succ_move = False + finish_move2 = True + + if (finish_move2): + break + else: + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + print("succ_move1",name,bbox['x_max'],bbox['y_max'],len(last_pcd_processed)) + break + + pcd.translate([0, -y_step, 0]) + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step, 0] + T_trans1 = T_transTemp @ T_trans1 + + y_accum += y_step + + if succ_move: + #print("succ_move2", name) + #""" + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + #print("Move x_step_big", name, bbox['y_max'], bbox['x_max']) + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + pcd.translate([-x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_max", name, bbox['y_max'], bbox['x_max']) + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_max2", name, bbox['y_max'], bbox['x_max']) + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + #print("Move y_step_big", name, bbox['y_max'], bbox['x_max']) + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + else: + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + min_y = np.min(points[:, 1]) + tx = edge_x_min - min_x + ty = edge_y_min - min_y + pcd.translate((tx, ty, 0), relative=True) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [tx, ty, 0] + T_trans1 = T_transTemp @ T_trans1 + + print("last place", name) + + #""" + can_place_last = False + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+2 + if bbox['x_max'] >= edge_x_max - collision_threshold_init: + if not can_place_last: + print("fail to place",name) + dict_unplaced[name]=name + else: + pcd.translate([-x_accu, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [-x_accu, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if not check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + can_place_last = True + x_accu = 0 + while True: + bbox = get_axis_aligned_bbox(pcd) + collision_threshold_init = collision_threshold+1 + if bbox['y_max'] >= edge_y_max - collision_threshold_init: + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + if check_collision_all(pcd, last_pcd_processed,collision_threshold_init): + pcd.translate([0, -y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, -y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + break + pcd.translate([0, y_step_big, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [0, y_step_big, 0] + T_trans1 = T_transTemp @ T_trans1 + else: + n = 1 + + x_accu += x_step_big + pcd.translate([x_step_big, 0, 0]) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [x_step_big, 0, 0] + T_trans1 = T_transTemp @ T_trans1 + #""" + + last_pcd_processed.append(pcd) + + dict_compact[name] = pcd + + dict_total_matrix[obj_name]= T_trans1 @ dict_total_matrix[obj_name] + +def move_to_top_left(pcd, edge_x_min, edge_y_max): + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + max_y = np.max(points[:, 1]) # 当前最大y值 + + tx = edge_x_min - min_x + # ty = -max_y - 0.001 + ty = edge_y_max - max_y + + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [tx, ty, 0] + + pcd.translate((tx, ty, 0), relative=True) + + return T_transTemp + +def move_to_bottom_left(pcd, edge_x_min, edge_y_min): + + points = np.asarray(pcd.points) + min_x = np.min(points[:, 0]) + min_y = np.min(points[:, 1]) + tx = edge_x_min - min_x + ty = edge_y_min - min_y + pcd.translate((tx, ty, 0), relative=True) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [tx, ty, 0] + + return T_transTemp + +def move_to_bottom_right(pcd, edge_x_max, edge_y_min): + + points = np.asarray(pcd.points) + max_x = np.min(points[:, 0]) + min_y = np.min(points[:, 1]) + tx = edge_x_max - max_x + ty = edge_y_min - min_y + pcd.translate((tx, ty, 0), relative=True) + T_transTemp = np.eye(4) + T_transTemp[:3, 3] = [tx, ty, 0] + + return T_transTemp + +def get_y_centroid(file_path): + pcd = o3d.io.read_point_cloud(file_path) + return np.mean(np.asarray(pcd.points)[:,1]) + +def get_axis_aligned_bbox(pcd): + points = np.asarray(pcd.points) + return { + 'x_min': np.min(points[:,0]), + 'x_max': np.max(points[:,0]), + 'y_min': np.min(points[:,1]), + 'y_max': np.max(points[:,1]), + 'z_min': np.min(points[:,2]), + 'z_max': np.max(points[:,2]) + } + +def check_x_collision(target_bbox, placed_models,collision_threshold=2): + for model in placed_models: + print(target_bbox['x_max'],target_bbox['x_min'],model['bbox']['x_max'],model['bbox']['x_min']) + if (target_bbox['x_max']+collision_threshold > model['bbox']['x_min']): + return True + return False + +def check_y_collision(target_bbox, placed_models,collision_threshold=2): + for model in placed_models: + if (target_bbox['y_max']+collision_threshold > model['bbox']['y_max']): + return True + return False + +def down_sample(pcd, voxel_size, farthest_sample = False): + original_num = len(pcd.points) + target_samples = 1500 # 1000 + num_samples = min(target_samples, original_num) + + # 第一步:使用体素下采样快速减少点数量 + # voxel_size = 3 + if farthest_sample: + pcd_voxel = pcd.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_voxel = pcd.voxel_down_sample(voxel_size) + down_num = len(pcd_voxel.points) + # print(f"original_num={original_num}, down_num={down_num}") + + # 第二步:仅在必要时进行最远点下采样 + if len(pcd_voxel.points) > target_samples and False: + pcd_downsampled = pcd_voxel.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_downsampled = pcd_voxel + + return pcd_downsampled + +def down_obj_data_to_ply(weight_fix_out_obj_dir,weight_fix_out_ply_dir): + """""" + obj_file_list = [aa for aa in os.listdir(weight_fix_out_obj_dir) if aa.endswith(".obj")] + for obj_name in obj_file_list: + obj_path = os.path.join(weight_fix_out_obj_dir,obj_name) + + mesh_obj = read_mesh(obj_path) + + vertices = np.asarray(mesh_obj.vertices) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + voxel_size = 3 # 设置体素的大小,决定下采样的密度 + pcd_downsampled = down_sample(pcd, voxel_size) + ply_out_path = os.path.join(weight_fix_out_ply_dir,obj_name.replace(".obj",".ply")) + o3d.io.write_point_cloud(ply_out_path, pcd_downsampled) + print(ply_out_path,"下采样完成。") + +def compute_centroid_compact(pcd): + points = np.asarray(pcd.points) + centroid = np.mean(points, axis=0) + return centroid + +def compute_base_point(pcd): + points = np.asarray(pcd.points) + x_center = np.mean(points[:, 0]) + y_center = np.mean(points[:, 1]) + min_z = np.min(points[:, 2]) + return np.array([x_center, y_center, min_z]) + +import copy +def move_obj_to_compact_bounds(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir,base_original_obj_dir,compact_obj_out_dir,dict_mesh_obj,dict_unplaced,placed_remove_dir,dict_bad,bad_dir,full_dir,dict_bounds_fix,dict_compact,dict_origin): + """""" + # obj_file_list = [aa for aa in os.listdir(weight_fix_out_obj_dir) if aa.endswith(".obj")] + obj_file_list = list(dict_mesh_obj.keys()) + ply_path_dict = {} + + # meshes = [] + # for ply_file_name in os.listdir(bounds_fix_out_dir): + for ply_file_name in dict_bounds_fix: + ply_dict_key = ply_file_name.split("=")[0] + ply_path_dict[ply_dict_key] = ply_file_name + for obj_name in obj_file_list: + obj_path = os.path.join(weight_fix_out_obj_dir, obj_name) + + # mesh_obj = read_mesh(obj_path, False) + mesh_obj = dict_mesh_obj[obj_name] + # mesh_obj = dict_origin[obj_origin_path] + + original_obj_pid_dir = base_original_obj_dir + obj_origin_path = os.path.join(original_obj_pid_dir, obj_name) + obj_origin = dict_origin[obj_origin_path] + # obj_origin = copy.deepcopy(dict_origin[obj_origin_path]) + + ply_name_pid = obj_name.replace(".obj","") + # ply_name = ply_path_dict[ply_name_pid] + ply_name = ply_path_dict.get(ply_name_pid,None) + print(ply_name_pid,ply_name) + if ply_name is None: + continue + print("move_obj_to_compact_bounds",ply_name,len(dict_unplaced)) + if not ply_name or ply_name in dict_unplaced: + print("unplaced",ply_name) + continue + ply_fix_path = os.path.join(bounds_fix_out_dir,ply_name) + ply_compact_path = os.path.join(bounds_compact_out_dir, ply_name) + # pcd_fix = o3d.io.read_point_cloud(ply_fix_path) + pcd_fix = dict_bounds_fix[ply_name] + + vertices = np.asarray(obj_origin.vertices) + pcd_origin = o3d.geometry.PointCloud() + pcd_origin.points = o3d.utility.Vector3dVector(vertices) + + # pcd_compact = o3d.io.read_point_cloud(ply_compact_path) + pcd_compact = dict_compact[ply_name] + + centroid_fix = compute_centroid_compact(pcd_fix) + centroid_compact = compute_centroid_compact(pcd_compact) + centroid_origin = compute_centroid_compact(pcd_origin) + displacement = centroid_compact - centroid_fix + # displacement = centroid_compact - centroid_origin + vertices = np.asarray(mesh_obj.vertices) + vertices_translated = vertices + displacement # 将位移应用到每个顶点 + mesh_obj.vertices = o3d.utility.Vector3dVector(vertices_translated) # 更新网格顶点 + + obj_pid = obj_name.split("_P")[0] + #compact_obj_pid_out_dir = os.path.join(compact_obj_out_dir,obj_pid) + compact_obj_pid_out_dir= compact_obj_out_dir + if not os.path.exists(compact_obj_pid_out_dir): + os.makedirs(compact_obj_pid_out_dir) + obj_path_compact = os.path.join(compact_obj_pid_out_dir,obj_name) + mesh_obj.compute_vertex_normals() + o3d.io.write_triangle_mesh(obj_path_compact, mesh_obj,write_triangle_uvs=True) + print(obj_path_compact, "移动后obj保存完成", displacement) + + # meshes.append(mesh_obj) + + #original_obj_pid_dir = os.path.join(base_original_obj_dir,obj_pid) + original_obj_pid_dir = base_original_obj_dir + for mtl in os.listdir(compact_obj_pid_out_dir): + if mtl.endswith(".mtl"): + if obj_pid in mtl: + mtl_path = os.path.join(compact_obj_pid_out_dir,mtl) + os.remove(mtl_path) + + mtl_name = None + tex_name = None + for file_name in os.listdir(original_obj_pid_dir): + if file_name.endswith(".mtl"): + if obj_pid in file_name: + mtl_name = file_name + if file_name.endswith(".jpg"): + if obj_pid in file_name: + tex_name = file_name + if file_name.endswith(".png"): + if obj_pid in file_name: + tex_name = file_name + + for file in os.listdir(original_obj_pid_dir): + #print(f"file{file}") + if file.endswith(".obj"): + continue + if obj_pid not in file: + continue + + origin_path = os.path.join(original_obj_pid_dir,file) + dis_path = os.path.join(compact_obj_pid_out_dir,file) + + if os.path.isfile(origin_path): + #print(f'origin_path{origin_path}') + #print(f'dis_path{dis_path}') + shutil.copy(origin_path,dis_path) + time.sleep(1) + #print("-"*50) + base_origin_obj_path = os.path.join(original_obj_pid_dir,obj_name) + #print(f"base_origin_obj_path{base_origin_obj_path}") + #print(f"obj_path_compact{obj_path_compact}") + update_obj_file(base_origin_obj_path, obj_path_compact) + + placed_remove_obj_path = os.path.join(placed_remove_dir, obj_name) + shutil.copy(base_origin_obj_path,placed_remove_obj_path) + os.remove(base_origin_obj_path) + + exist_obj_any = False + exist_obj = False + delete_mtl = False + for file_name in os.listdir(original_obj_pid_dir): + if file_name.endswith(".obj"): + if obj_pid in file_name: + exist_obj = True + exist_obj_any = True + + if not exist_obj_any: + delete_mtl = True + if not exist_obj: + delete_mtl = True + + if delete_mtl: + print("delete_mtl",mtl_name,tex_name) + if mtl_name!=None: + base_origin_mtl_path = os.path.join(original_obj_pid_dir,mtl_name) + placed_remove_mtl_path = os.path.join(placed_remove_dir, mtl_name) + shutil.copy(base_origin_mtl_path,placed_remove_mtl_path) + os.remove(base_origin_mtl_path) + + if tex_name!=None: + base_origin_tex_path = os.path.join(original_obj_pid_dir,tex_name) + placed_remove_tex_path = os.path.join(placed_remove_dir, tex_name) + shutil.copy(base_origin_tex_path,placed_remove_tex_path) + os.remove(base_origin_tex_path) + + """ + # 创建可视化窗口 + vis = o3d.visualization.Visualizer() + vis.create_window(window_name='模型展示') + + # 添加所有模型到场景 + for mesh in meshes: + vis.add_geometry(mesh) + + # 设置相机视角 + vis.get_render_option().mesh_show_back_face = True + vis.get_render_option().light_on = True + + # 运行可视化 + vis.run() + vis.destroy_window() + """ + + print(f"排版错误模型数量::{len(dict_bad)}") + for obj_name in dict_bad: + print("--错误模型名:", obj_name) + process_obj_files(original_obj_pid_dir,bad_dir,obj_name) + + print(f"排版剩余模型数量::{len(dict_unplaced)}") + for ply_file_name in dict_unplaced: + obj_name = ply_file_name.split("=")[0]+".obj" + print("--剩余模型名:", obj_name) + process_obj_files(original_obj_pid_dir,full_dir,obj_name) + +import json + +def extract_angles(R): + # 提取绕X、Y、Z轴的旋转角度(弧度) + rx_rad = np.arctan2(R[2, 1], R[2, 2]) # X轴旋转 + ry_rad = np.arcsin(-R[2, 0]) # Y轴旋转 + rz_rad = np.arctan2(R[1, 0], R[0, 0]) # Z轴旋转 + + # 将弧度转换为角度(度数) + rx_deg = np.degrees(rx_rad) + ry_deg = np.degrees(ry_rad) + rz_deg = np.degrees(rz_rad) + + return rx_deg, ry_deg, rz_deg + +def compute_mesh_center(vertices): + """ + 计算网格质心 + + 参数: + vertices: 顶点坐标数组,形状为(N, 3)的NumPy数组或列表 + + 返回: + centroid: 质心坐标的NumPy数组 [x, y, z] + """ + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + n = len(vertices) # 顶点数量 + # 初始化坐标累加器 + sum_x, sum_y, sum_z = 0.0, 0.0, 0.0 + + # 遍历所有顶点累加坐标值 + for vertex in vertices: + sum_x += vertex[0] + sum_y += vertex[1] + sum_z += vertex[2] + + # 计算各坐标轴的平均值 + centroid = np.array([sum_x / n, sum_y / n, sum_z / n]) + return centroid + +import re +def extract_numbers_from_filename(filename): + """ + 从文件名中提取893333, 338908, 105043和x后面的数字 + """ + # 提取前两个下划线前的数字 + first_part = re.findall(r'^(\d+)_(\d+)', filename) + if first_part: + num1, num2 = first_part[0] + else: + num1, num2 = None, None + + # 提取P后面的数字 + p_number = re.findall(r'P(\d+)', filename) + num3 = p_number[0] if p_number else None + + # 提取x后面的数字 + x_number = re.findall(r'x(\d+)', filename) + num4 = x_number[0] if x_number else None + + return [num for num in [num1, num2, num3, num4] if num is not None] + +import requests +def move_obj_to_compact_bounds_json(bounds_fix_out_dir, bounds_compact_out_dir, weight_fix_out_obj_dir, + base_original_obj_dir, compact_obj_out_dir, dict_mesh_obj,dict_unplaced, + placed_remove_dir, dict_bad, bad_dir, full_dir,dict_best_angel,dict_bounds_fix, + dict_compact,dict_origin,dict_total_matrix,save_mesh,cache_type_setting_dir,batch_id, print_start_time,selected_machine,selected_mode,version): + """生成3D打印布局的JSON数据并保存为3DPrintLayout.json""" + # 创建符合3DPrintLayout规范的JSON数据结构 + layout_data = { + "summary": { + "version": version, + "homo_matrix": "Homogeneous Matrix", + "precision": 6, + "selected_machine": selected_machine, + "selected_mode": selected_mode + }, + "models": [] + } + + send_layout_data={ + "data": [], + "pre_complate_time": 0.0, + "pre_batch_id": batch_id, + "type_setting_start_time": print_start_time + } + + print("is_test=", is_test) + if is_test: + is_send_layout_data = False + else: + is_send_layout_data = True + # is_send_layout_data = False + + obj_file_list = list(dict_mesh_obj.keys()) + ply_path_dict = {} + meshes = [] + + original_obj_pid_dir = base_original_obj_dir + + # 构建PLY文件路径映射 + # for ply_file_name in os.listdir(bounds_fix_out_dir): + for ply_file_name in dict_bounds_fix: + ply_dict_key = ply_file_name.split("=")[0] + ply_path_dict[ply_dict_key] = ply_file_name + + for obj_name in obj_file_list: + ply_name_pid = obj_name.replace(".obj", "") + ply_name = ply_path_dict.get(ply_name_pid, None) + + if is_send_layout_data: + result = extract_numbers_from_filename(ply_name) + + if not ply_name or ply_name in dict_unplaced: + + if is_send_layout_data: + print_id = result[2] + order_id = result[0] + status = 0 + pid = result[1] + counts = result[3] + send_layout_data["data"].append({ + "print_id": print_id, + "order_id": order_id, + "status": status, + "pid":pid, + "counts":counts}) + + continue # 跳过未放置的模型 + + total_matrix = dict_total_matrix[obj_name] + + # if save_mesh: + # print("do save mesh here") + + flattened = total_matrix.flatten()[:16] + + matrix_4x4 = [ + [round(flattened[i], 6) for i in range(0, 4)], # 第1行 + [round(flattened[i], 6) for i in range(4, 8)], # 第2行 + [round(flattened[i], 6) for i in range(8, 12)], # 第3行 + [round(flattened[i], 6) for i in range(12, 16)] # 第4行 + ] + + layout_data["models"].append({ + "file_name": obj_name, + "transform": { + "homo_matrix": matrix_4x4 + } + }) + + if is_send_layout_data: + print_id = result[2] + order_id = result[0] + status = 1 + pid = result[1] + counts = result[3] + send_layout_data["data"].append({ + "print_id": print_id, + "order_id": order_id, + "status": status, + "pid":pid, + "counts":counts}) + + # 保存JSON文件 + # json_path = os.path.join(base_original_obj_dir, "3DPrintLayout.json") + json_path = os.path.join(base_original_obj_dir, f"{batch_id}.json") + + """ + if is_send_layout_data: + print(f"send_layout_data={send_layout_data}") + url = 'https://mp.api.suwa3d.com/api/printTypeSettingOrder/printTypeSettingOrderSuccess' + # url = 'http://127.0.0.1:8199/api/typeSettingPrintOrder/printTypeSettingOrderSuccess' + try: + response = requests.post(url, json.dumps(send_layout_data), timeout=30) + #写入文件中 log/request.txt + # with open('log/request.txt', 'w+') as f: + # f.write(json.dumps(send_layout_data, ensure_ascii=False, indent=2)) + # 检查响应状态码 + if response.status_code == 200: + try: + result = response.json() + print(f"请求成功,返回结果: {result}") + except ValueError as e: + print(f"响应不是有效的JSON格式: {e}") + print(f"响应内容: {response.text}") + else: + print(f"请求失败,状态码: {response.status_code}") + print(f"响应内容: {response.text}") + except requests.exceptions.Timeout: + print(f"请求超时: 连接 {url} 超过30秒未响应") + except requests.exceptions.ConnectionError as e: + print(f"连接错误: 无法连接到服务器 {url}, 错误信息: {e}") + except requests.exceptions.RequestException as e: + print(f"请求异常: {e}") + except Exception as e: + print(f"未知错误: {e}") + """ + + import re + json_str = json.dumps(layout_data, ensure_ascii=False, indent=2) + json_str = re.sub( + r'\[\s*(-?[\d.]+),\s+(-?[\d.]+),\s+(-?[\d.]+),\s+(-?[\d.]+)\s*\]', + r'[\1,\2,\3,\4]', + json_str + ) + with open(json_path, "w", encoding='utf-8') as f: + f.write(json_str) + + print(f"3D打印布局已保存至: {json_path}") + + print(f"排版错误模型数量::{len(dict_bad)}") + for obj_name in dict_bad: + print("--错误模型名:", obj_name) + process_obj_files(original_obj_pid_dir,bad_dir,obj_name) + + print(f"排版剩余模型数量::{len(dict_unplaced)}") + for ply_file_name in dict_unplaced: + obj_name = ply_file_name.split("=")[0]+".obj" + print("--剩余模型名:", obj_name) + process_obj_files(original_obj_pid_dir,full_dir,obj_name) + + if save_mesh: + transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir) + + """ + # 小打印机380*345,需要偏移-380,-345 + need_offset = True + for model in layout_data["models"]: + transform = model.get('transform', {}) + + homo_matrix = transform["homo_matrix"] # 获取存储的列表 + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + obj_name = model.get('file_name', '') + obj_path = os.path.join(original_obj_pid_dir, obj_name) + # 加载网格 + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + original_vertices = np.asarray(mesh.vertices) + + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + # 如果 need_offset 为 True,应用额外的偏移 + if need_offset: + # 应用偏移 (-380, -345, 0) + offset = np.array([-380, -345, 0]) + transformed_vertices += offset + print(f"已对模型 {obj_name} 应用偏移: {offset}") + + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + meshes.append(mesh) + + # obj_path_arrange = os.path.join(original_obj_pid_dir, "arrange") + obj_path_arrange = cache_type_setting_dir + if not os.path.exists(obj_path_arrange): + os.mkdir(obj_path_arrange) + obj_path_arrange_obj = os.path.join(obj_path_arrange, obj_name) + print("obj_path_arrange_obj", obj_path_arrange_obj) + mesh.compute_vertex_normals() + o3d.io.write_triangle_mesh(obj_path_arrange_obj, mesh,write_triangle_uvs=True) + # """ + + return layout_data, send_layout_data + +#""" +def transform_save(layout_data, original_obj_pid_dir, cache_type_setting_dir): + print(f"original_obj_pid_dir={original_obj_pid_dir}, cache_type_setting_dir={cache_type_setting_dir}") + meshes = [] + # 小打印机380*345,需要偏移-380,-345 + need_offset = True + for model in layout_data["models"]: + transform = model.get('transform', {}) + + homo_matrix = transform["homo_matrix"] # 获取存储的列表 + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + obj_name = model.get('file_name', '') + obj_path = os.path.join(original_obj_pid_dir, obj_name) + # 加载网格 + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + original_vertices = np.asarray(mesh.vertices) + + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + # 如果 need_offset 为 True,应用额外的偏移 + if need_offset: + # 应用偏移 (-380, -345, 0) + offset = np.array([-380, -345, 0]) + transformed_vertices += offset + print(f"已对模型 {obj_name} 应用偏移: {offset}") + + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + meshes.append(mesh) + + # obj_path_arrange = os.path.join(original_obj_pid_dir, "arrange") + obj_path_arrange = cache_type_setting_dir + if not os.path.exists(obj_path_arrange): + os.mkdir(obj_path_arrange) + obj_path_arrange_obj = os.path.join(obj_path_arrange, obj_name) + # print("obj_path_arrange_obj", obj_path_arrange_obj) + + mesh.compute_vertex_normals() + o3d.io.write_triangle_mesh(obj_path_arrange_obj, mesh, write_triangle_uvs=True) +#""" + +#""" +def transform_save2(layout_data, original_obj_pid_dir, cache_type_setting_dir): + print(f"original_obj_pid_dir={original_obj_pid_dir}, cache_type_setting_dir={cache_type_setting_dir}") + + # 确保输出目录存在 + os.makedirs(cache_type_setting_dir, exist_ok=True) + + # 小打印机380 * 345,需要偏移-380,-345 + need_offset = True + + for model in layout_data["models"]: + transform = model.get('transform', {}) + homo_matrix = transform["homo_matrix"] + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + obj_name = model.get('file_name', '') + obj_path = os.path.join(original_obj_pid_dir, obj_name) + + # 1. 加载原始网格 + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + # 2. 应用顶点变换 + original_vertices = np.asarray(mesh.vertices) + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + + if need_offset: + offset = np.array([-380, -345, 0]) + transformed_vertices += offset + print(f"已对模型 {obj_name} 应用偏移: {offset}") + + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + # 3. 计算顶点法线 + mesh.compute_vertex_normals() + + # 4. 构建输出路径 + obj_path_arrange_obj = os.path.join(cache_type_setting_dir, obj_name) + print("保存路径:", obj_path_arrange_obj) + + # 5. 关键步骤:直接写入OBJ文件,只包含几何数据 + write_obj_without_new_materials(mesh, obj_path_arrange_obj, obj_path) + + post_process_obj_to_use_original_mtl(obj_path, obj_path_arrange_obj) + + print(f"模型 {obj_name} 处理完成,已保留原始材质") + +def write_obj_without_new_materials(mesh, output_obj_path, original_obj_path): + """ + 只写入几何数据(顶点、面、法线、UV),不生成新的材质文件 + """ + # 读取原始OBJ文件中的材质引用 + original_mtl_ref = get_original_mtl_reference(original_obj_path) + + # 写入新的OBJ文件 + with open(output_obj_path, 'w', encoding='utf-8') as f: + # 写入原始材质引用(如果存在) + # print(f"original_mtl_ref={original_mtl_ref}") + if original_mtl_ref: + f.write(f"mtllib {original_mtl_ref}\n") + + # 写入顶点 + vertices = np.asarray(mesh.vertices) + for v in vertices: + f.write(f"v {v[0]:.4f} {v[1]:.4f} {v[2]:.4f}\n") + + # 写入顶点法线 + if mesh.has_vertex_normals(): + normals = np.asarray(mesh.vertex_normals) + for n in normals: + f.write(f"vn {n[0]:.4f} {n[1]:.4f} {n[2]:.4f}\n") + + # 写入纹理坐标(UV) + # print(f"has_triangle_uvs={mesh.has_triangle_uvs}") + if mesh.has_triangle_uvs(): + uvs = np.asarray(mesh.triangle_uvs) + for uv in uvs: + f.write(f"vt {uv[0]:.4f} {uv[1]:.4f}\n") + + # 写入面 + triangles = np.asarray(mesh.triangles) + uv_indices = np.arange(len(triangles) * 3).reshape(-1, 3) if mesh.has_triangle_uvs() else None + + for i, tri in enumerate(triangles): + v1, v2, v3 = tri + 1 # OBJ索引从1开始 + + if mesh.has_vertex_normals() and mesh.has_triangle_uvs(): + # 顶点索引/纹理索引/法线索引 + uv1, uv2, uv3 = uv_indices[i] + 1 + f.write(f"f {v1}/{uv1}/{v1} {v2}/{uv2}/{v2} {v3}/{uv3}/{v3}\n") + elif mesh.has_vertex_normals(): + f.write(f"f {v1}//{v1} {v2}//{v2} {v3}//{v3}\n") + elif mesh.has_triangle_uvs(): + uv1, uv2, uv3 = uv_indices[i] + 1 + f.write(f"f {v1}/{uv1} {v2}/{uv2} {v3}/{uv3}\n") + else: + f.write(f"f {v1} {v2} {v3}\n") + +def get_original_mtl_reference(original_obj_path): + """ + 从原始OBJ文件中提取mtllib引用 + """ + if not os.path.exists(original_obj_path): + return None + + with open(original_obj_path, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith('mtllib'): + return line.split()[1] # 返回mtllib后面的文件名 + return None + +def post_process_obj_to_use_original_mtl(original_obj_path, new_obj_path): + """ + 后处理OBJ文件,使其引用原始的MTL材质库而非新生成的。 + """ + original_mtl_name = None + original_dir = os.path.dirname(original_obj_path) + + # 读取原始OBJ文件,查找其使用的MTL材质库名称 + if os.path.exists(original_obj_path): + with open(original_obj_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line.startswith('mtllib'): + original_mtl_name = line.split()[1] # 获取mtllib后面的文件名 + break + + # 如果找到了原始MTL引用,并且该MTL文件存在,则复制它到新位置 + if original_mtl_name: + original_mtl_path = os.path.join(original_dir, original_mtl_name) + new_dir = os.path.dirname(new_obj_path) + new_mtl_path = os.path.join(new_dir, original_mtl_name) + + # 复制MTL文件 + if os.path.exists(original_mtl_path): + shutil.copy2(original_mtl_path, new_mtl_path) + print(f"已复制材质文件: {original_mtl_path} -> {new_mtl_path}") + + # 同时复制MTL中引用的所有纹理图片 + copy_textures_referenced_in_mtl(original_mtl_path, original_dir, new_dir) + + # 读取新OBJ文件内容,修改mtllib行指向原始的材质库 + if os.path.exists(new_obj_path): + with open(new_obj_path, 'r', encoding='utf-8') as f: + content = f.readlines() + + # 修改内容:将mtllib行指向原始材质库 + with open(new_obj_path, 'w', encoding='utf-8') as f: + for line in content: + if line.startswith('mtllib'): + f.write(f'mtllib {original_mtl_name}\n') + else: + f.write(line) + +def copy_textures_referenced_in_mtl(mtl_path, original_dir, new_dir): + """ + 复制MTL文件中引用的所有纹理图片。 + """ + try: + with open(mtl_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + # 查找纹理贴图定义(常见的贴图类型) + if any(line.startswith(keyword) for keyword in ['map_Kd', 'map_Ks', 'map_Ka', 'map_bump', 'map_d']): + parts = line.split() + if len(parts) >= 2: + tex_name = parts[1] + original_tex_path = os.path.join(original_dir, tex_name) + new_tex_path = os.path.join(new_dir, tex_name) + + if os.path.exists(original_tex_path): + shutil.copy2(original_tex_path, new_tex_path) + print(f"已复制纹理文件: {original_tex_path} -> {new_tex_path}") + except Exception as e: + print(f"处理材质文件 {mtl_path} 时出错: {e}") +#""" + +#""" +#""" + +def process_obj_files(original_obj_pid_dir,placed_remove_dir,obj_name): + """ + 处理OBJ文件及其相关资源文件的复制、更新和清理 + + 参数: + original_obj_pid_dir: 包含原始OBJ文件的目录 + placed_remove_dir: 用于存放移除文件的目录 + obj_name: 要处理的OBJ文件名 + """ + + base_origin_obj_path = os.path.join(original_obj_pid_dir,obj_name) + + # 从obj_name中提取PID(产品ID) + obj_pid = obj_name.split("_P")[0] + + # 查找相关的MTL和纹理文件 + mtl_name = None + tex_name = None + + for file_name in os.listdir(original_obj_pid_dir): + + if file_name.endswith(".mtl") and obj_pid in file_name: + mtl_name = file_name + + if (file_name.endswith(".jpg") or file_name.endswith(".png")) and obj_pid in file_name: + tex_name = file_name + + # 将原始OBJ文件移动到移除目录 + placed_remove_obj_path = os.path.join(placed_remove_dir, obj_name) + shutil.copy(base_origin_obj_path, placed_remove_obj_path) + os.remove(base_origin_obj_path) + + # 检查目录中是否还有其他OBJ文件 + exist_obj_any = False + exist_obj = False + + for file_name in os.listdir(original_obj_pid_dir): + if file_name.endswith(".obj"): + exist_obj_any = True + if obj_pid in file_name: + exist_obj = True + + # 确定是否需要删除MTL和纹理文件 + delete_mtl = not exist_obj_any or not exist_obj + + # 如果确定要删除,移动MTL和纹理文件 + if delete_mtl: + if mtl_name: + base_origin_mtl_path = os.path.join(original_obj_pid_dir, mtl_name) + placed_remove_mtl_path = os.path.join(placed_remove_dir, mtl_name) + shutil.copy(base_origin_mtl_path, placed_remove_mtl_path) + os.remove(base_origin_mtl_path) + + if tex_name: + base_origin_tex_path = os.path.join(original_obj_pid_dir, tex_name) + placed_remove_tex_path = os.path.join(placed_remove_dir, tex_name) + shutil.copy(base_origin_tex_path, placed_remove_tex_path) + os.remove(base_origin_tex_path) + +def update_obj_file(original_obj_path,compact_obj_path): + """""" + with open(original_obj_path, "r") as f: + lines_original = f.readlines() + mtllib_name = None + mat_name = None + for line in lines_original: + if line.startswith("mtllib"): + mtllib_name = line.split(" ")[1] + elif line.startswith("usemtl"): + mat_name = line.split(" ")[1] + if mtllib_name and mat_name: + break + with open(compact_obj_path, "r") as f: + lines = f.readlines() + new_lines = [] + for line2 in lines: + if line2.startswith("mtllib"): + line2 = f"mtllib {mtllib_name}\n" # 替换为原始 MTL 文件路径 + elif line2.startswith("usemtl"): + line2 = f"usemtl {mat_name}\n" # 替换为原始贴图路径 + new_lines.append(line2) + with open(compact_obj_path, "w") as f: + f.writelines(new_lines) + +def import_and_process_obj(input_obj_file, output_obj_file, xiong_zhang): + # 清除现有对象 + bpy.ops.object.select_all(action='SELECT') + bpy.ops.object.delete(use_global=False) + + # 导入OBJ文件 + bpy.ops.wm.obj_import(filepath=input_obj_file) + bpy.context.object.name = 'body' + + parent_dir = os.path.dirname(input_obj_file) + # 查找 parent_dir 目录中的 .jpg 文件,并设置为材质的贴图 + for file in os.listdir(parent_dir): + if file.endswith('.jpg'): + texture_path = os.path.join(parent_dir, file) + break + materials = bpy.data.materials + for material in materials: + # 确保材质使用节点树 + if not material.use_nodes: + material.use_nodes = True + + node_tree = material.node_tree + texture_node = node_tree.nodes.new(type='ShaderNodeTexImage') + texture_node.image = bpy.data.images.load(texture_path) + + bsdf_node = node_tree.nodes.get('Principled BSDF') + if bsdf_node: + node_tree.links.new(bsdf_node.inputs['Base Color'], texture_node.outputs['Color']) + + bpy.ops.wm.obj_export(filepath=output_obj_file) + +def pass_for_min_dis(bounds_fix_out_dir,bounds_compact_out_dir, placed_models, dict_unplaced,dict_bounds_fix, dict_compact): + """ + for ply in os.listdir(bounds_fix_out_dir): + original_ply_path = os.path.join(bounds_fix_out_dir,ply) + dis_ply_path = os.path.join(bounds_compact_out_dir,ply) + shutil.copy(original_ply_path,dis_ply_path) + """ + for ply in dict_bounds_fix: + dict_compact[ply] = dict_bounds_fix[ply] + + pcd_all = [] + name_list = [] + model_list = [] + + for model in placed_models: + ply_origin_path = os.path.join(bounds_fix_out_dir,model['name']) + # pcd = o3d.io.read_point_cloud(ply_origin_path) + pcd = dict_bounds_fix[model['name']] + pcd_all.append(pcd) + name_list.append(model['name']) + model_list.append(model) + + for idx, pcd in enumerate(pcd_all): + y = model_list[idx]['position'][1] + dx = model_list[idx]['dimensions'][0] + dy = model_list[idx]['dimensions'][1] + # print("pass_for_min_dis", name_list[idx], y, dy) + + delta_y = 20 + # safe_y = y - delta_y + safe_y = y - dy - delta_y + min_y = 0 + if safe_y < min_y: + name = name_list[idx] + print("fail to place (x=0)", name_list[idx], y, dy) + dict_unplaced[name]=name + +def bake_textures(obj): + # 设置烘焙参数 + bpy.context.scene.render.engine = 'CYCLES' + bpy.context.scene.cycles.bake_type = 'DIFFUSE' + + # 创建贴图图像 + image = bpy.data.images.new("BakedTexture", 1024, 1024) + + # 执行烘焙 + bpy.ops.object.bake( + type='DIFFUSE', + pass_filter={'COLOR'}, + filepath=image.filepath + ) + + return image + +if __name__ == '__main__': + out_dir = "/data/datasets_20t/type_setting_test_data/" + weight_fix_out_obj_dir = f"{out_dir}/print_weight_fix_data_obj" + weight_fix_out_ply_dir = f"{out_dir}/data/datasets_20t/type_setting_test_data/print_weight_fix_data_ply" + print_factory_type_dir="/root/print_factory_type" + base_original_obj_dir="{print_factory_type_dir}/8/" + if not os.path.exists(weight_fix_out_ply_dir): + os.makedirs(weight_fix_out_ply_dir) + bounds_fix_out_dir = f"{out_dir}/print_bounds_fix_data" + bounds_compact_out_dir = f"{out_dir}/print_bounds_compact_data" + compact_obj_out_dir = f"{out_dir}//print_compact_obj" + if not os.path.exists(bounds_fix_out_dir): + os.mkdir(bounds_fix_out_dir) + if not os.path.exists(bounds_compact_out_dir): + os.makedirs(bounds_compact_out_dir) + if not os.path.exists(compact_obj_out_dir): + os.makedirs(compact_obj_out_dir) + + move_obj_to_compact_bounds(bounds_fix_out_dir,bounds_compact_out_dir,weight_fix_out_obj_dir,base_original_obj_dir,compact_obj_out_dir) \ No newline at end of file diff --git a/print_setting_run.py b/print_setting_run.py new file mode 100644 index 0000000..4b303ab --- /dev/null +++ b/print_setting_run.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +import pandas as pd +from threading import Thread +from PyQt5.QtWidgets import QApplication, QMainWindow,QMessageBox +from print_setting_ui import Ui_MainWindow +from PyQt5.QtGui import QFont +from PySide2.QtCore import Signal,QObject +from PyQt5.QtCore import QThread, pyqtSignal, QProcess +import sys +import os +import warnings +import time +import time +from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel, + QVBoxLayout, QFileDialog, QMessageBox) +from threading import Thread +from print_factory_type_setting_obj_run import print_type_setting_obj + +warnings.filterwarnings('ignore') +pd.set_option('display.width', None) + + +class MySignals(QObject): + text_print = Signal(str) + update_table = Signal(str) + + +class MyMainForm(QMainWindow, Ui_MainWindow): + def __init__(self,parent=None): + super(MyMainForm, self).__init__(parent) + self.setupUi(self) + self.folder_path = "" + self.cache_path = "" + self.pushButton.clicked.connect(self.on_select_folder) + self.pushButton_2.clicked.connect(self.on_run_clicked) + self.pushButton_3.clicked.connect(self.on_open_output_clicked) + self.global_ms = MySignals() + self.global_ms.text_print.connect(self.printToGui) + + def printToGui(self,text): + n_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + self.textBrowser.append(n_time+" "+str(text)) + + def on_select_folder(self): + folder = QFileDialog.getExistingDirectory(self, "选择文件夹") + if folder: + self.folder_path = folder + #self.folder_path_label.setText(f"📂 当前选择文件夹: {folder}") + #self.run_status_label.setText("") + self.cache_path = folder + "_arrange" + os.makedirs(self.cache_path, exist_ok=True) + + def on_run_clicked(self): + + def threadFunc1(): + if not self.folder_path: + #self.run_status_label.setText("❗请先选择一个文件夹再执行!") + return + + # if hasattr(self, 'worker') and self.worker.isRunning(): + # self.run_status_label.setText("⚠️ 正在执行中,请稍候...") + # return + #self.run_btn.setEnabled(False) + #self.run_status_label.setText("🚀 程序正在运行,请稍候...") + + print_type_setting_obj( + base_original_obj_dir=self.folder_path, + cache_type_setting_dir=self.cache_path, + show_chart=False + ) + + #self.run_status_label.setText("✅ 排版完成!") + + thread = Thread(target=threadFunc1) + thread.start() + + def open_file_cross_platform(self, path): + if not os.path.exists(path): + print("路径不存在!") + return + if sys.platform.startswith('win'): + os.startfile(path) + elif sys.platform.startswith('darwin'): + QProcess.startDetached("open", [path]) + else: + QProcess.startDetached("xdg-open", [path]) + + def on_open_output_clicked(self): + output_path = os.path.join(self.cache_path, "print_compact_obj") + if os.path.exists(output_path): + self.open_file_cross_platform(output_path) + else: + self.run_status_label.setText("⚠️ 输出文件夹不存在!") + + + def douyin_spider_go(self): + """下载抖音视频""" + def threadFunc1(): + print("开始下载") + self.load_chrome_video() + thread = Thread(target=threadFunc1) + thread.start() + #thread.run() + + def open_config_dir(self): + """打开文件夹""" + def threadFunc1(): + try: + start_directory = os.path.join(self.dir_base) + os.startfile(start_directory) + except RecursionError: + print("打开配置文件夹失败。") + thread = Thread(target=threadFunc1) + thread.start() + + + + + +if __name__ == '__main__': + #multiprocessing.freeze_support() + app = QApplication(sys.argv) + myWin = MyMainForm() + myWin.show() + sys.exit(app.exec_()) + + diff --git a/print_setting_ui.py b/print_setting_ui.py new file mode 100644 index 0000000..d3c139f --- /dev/null +++ b/print_setting_ui.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +# Form implementation generated from reading ui file 'dou_spider_ui.ui' +# +# Created by: PyQt5 UI code generator 5.15.4 +# +# WARNING: Any manual changes made to this file will be lost when pyuic5 is +# run again. Do not edit this file unless you know what you are doing. + + +from PyQt5 import QtCore, QtGui, QtWidgets + + +class Ui_MainWindow(object): + def setupUi(self, MainWindow): + MainWindow.setObjectName("MainWindow") + MainWindow.resize(300, 150) + self.centralwidget = QtWidgets.QWidget(MainWindow) + self.centralwidget.setObjectName("centralwidget") + self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) + self.verticalLayout.setObjectName("verticalLayout") + #self.folder_path_label = QLabel("📁 请选择要排版的文件夹") + #self.folder_path_label.setWordWrap(True) + # 按钮布局 + self.pushButton = QtWidgets.QPushButton(self.centralwidget) + self.pushButton.setObjectName("pushButton") + self.verticalLayout.addWidget(self.pushButton) + + self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) + self.pushButton_2.setObjectName("pushButton_2") + self.verticalLayout.addWidget(self.pushButton_2) + + self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) + self.pushButton_3.setObjectName("pushButton_3") + self.verticalLayout.addWidget(self.pushButton_3) + + MainWindow.setCentralWidget(self.centralwidget) + self.statusbar = QtWidgets.QStatusBar(MainWindow) + self.statusbar.setObjectName("statusbar") + MainWindow.setStatusBar(self.statusbar) + + self.retranslateUi(MainWindow) + QtCore.QMetaObject.connectSlotsByName(MainWindow) + + def retranslateUi(self, MainWindow): + _translate = QtCore.QCoreApplication.translate + MainWindow.setWindowTitle(_translate("MainWindow", "自动排版工具")) + self.pushButton.setText(_translate("MainWindow", "选择文件夹")) + self.pushButton_2.setText(_translate("MainWindow", "开始自动排版")) + self.pushButton_3.setText(_translate("MainWindow", "打开排版好的文件夹")) + diff --git a/print_show_weight_max_obj.py b/print_show_weight_max_obj.py new file mode 100644 index 0000000..695094e --- /dev/null +++ b/print_show_weight_max_obj.py @@ -0,0 +1,1120 @@ +import os.path +import shutil + +import open3d as o3d +import numpy as np +import copy +from tqdm import tqdm +import time +import multiprocessing +import oss2 +from joblib import Parallel, delayed +import itertools +from numba import njit, prange +#import bpy +from plyfile import PlyData, PlyElement +from multiprocessing import Pool, RawArray +import ctypes +import itertools + +from get_lowest_position_of_center_ext import get_lowest_position_of_center_ext +from get_lowest_position_of_center_ext import get_lowest_position_of_center_ext2 +from get_lowest_position_of_center_ext import get_lowest_position_of_center_ext3 +from get_lowest_position_of_center_ext import get_lowest_position_of_center_net + +def make_pcd_plane(): + # 创建Y=0的平面点云 + width = 60.0 # 平面的宽度 + height = 60.0 # 平面的高度 + resolution = 50 # 点云的分辨率,越高越密集 + + # 生成平面点云 + x = np.linspace(-width / 2, width / 2, resolution) + y = np.linspace(-height / 2, height / 2, resolution) + xv, yv = np.meshgrid(x, y) + zv = np.zeros_like(xv) # Y 坐标恒为 0 + + # 将网格转换为点云的顶点数组 + points_plane = np.vstack((xv.flatten(), yv.flatten(), zv.flatten())).T + + # 创建Open3D点云对象 + pcd_plane = o3d.geometry.PointCloud() + pcd_plane.points = o3d.utility.Vector3dVector(points_plane) + pcd_plane.paint_uniform_color([0, 1, 0]) + return pcd_plane + + +def calculate_rotation_and_center_of_mass(angle_x, angle_y, angle_z, points): + """计算某一组旋转角度后的重心""" + # 计算绕X轴、Y轴和Z轴的旋转矩阵 + R_x = np.array([ + [1, 0, 0], + [0, np.cos(np.radians(angle_x)), -np.sin(np.radians(angle_x))], + [0, np.sin(np.radians(angle_x)), np.cos(np.radians(angle_x))] + ]) + + R_y = np.array([ + [np.cos(np.radians(angle_y)), 0, np.sin(np.radians(angle_y))], + [0, 1, 0], + [-np.sin(np.radians(angle_y)), 0, np.cos(np.radians(angle_y))] + ]) + + R_z = np.array([ + [np.cos(np.radians(angle_z)), -np.sin(np.radians(angle_z)), 0], + [np.sin(np.radians(angle_z)), np.cos(np.radians(angle_z)), 0], + [0, 0, 1] + ]) + + # 综合旋转矩阵 + R = R_z @ R_y @ R_x + + # 执行旋转 + rotated_points = points @ R.T + + # 计算最小z值 + min_z = np.min(rotated_points[:, 2]) + + # 计算平移向量,将最小Z值平移到0 + translation_vector = np.array([0, 0, -min_z]) + rotated_points += translation_vector + + # 计算重心 + center_of_mass = np.mean(rotated_points, axis=0) + + return center_of_mass[2], angle_x, angle_y, angle_z + + +# def parallel_rotation(points, angle_step=3): +# """并行计算最优旋转角度""" +# # 记录最优结果的初始化 +# min_center_of_mass_y = float('inf') +# best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 +# +# # 计算每个角度的组合 +# angles_x = range(0, 360, angle_step) +# angles_y = range(0, 360, angle_step) +# angles_z = range(0, 360, angle_step) +# +# # 创建一个进程池并行处理 +# with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool: +# results = [] +# +# # 提交任务 +# for angle_x in angles_x: +# for angle_y in angles_y: +# for angle_z in angles_z: +# results.append( +# pool.apply_async(calculate_rotation_and_center_of_mass, (angle_x, angle_y, angle_z, points))) +# +# # 获取所有结果 +# for result in results: +# center_of_mass_z, angle_x, angle_y, angle_z = result.get() +# +# # 更新最优旋转角度 +# if center_of_mass_z < min_center_of_mass_y: +# min_center_of_mass_y = center_of_mass_z +# best_angle_x, best_angle_y, best_angle_z = angle_x, angle_y, angle_z +# +# return best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y +def parallel_rotation(points, angle_step=3): + """顺序计算最优旋转角度(单线程)""" + min_center_of_mass_y = float('inf') + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + + # 遍历所有角度组合 + for angle_x in range(0, 360, angle_step): + for angle_y in range(0, 360, angle_step): + for angle_z in range(0, 360, angle_step): + center_of_mass_z, ax, ay, az = calculate_rotation_and_center_of_mass( + angle_x, angle_y, angle_z, points + ) + if center_of_mass_z < min_center_of_mass_y: + min_center_of_mass_y = center_of_mass_z + best_angle_x, best_angle_y, best_angle_z = ax, ay, az + + return best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y + +#""" +@njit +def safe_min(arr): + if arr.size == 0: + return np.inf + return np.min(arr) + +# 核心计算函数(支持Numba加速) +@njit(fastmath=True, cache=True) +def calculate_rotation_z(angle_x, angle_y, angle_z, points, cos_cache, sin_cache, angle_step): + #计算单个旋转组合后的重心Z坐标(无显式平移) + + if points.shape[0] == 0: + return np.inf # 返回极大值避免干扰最优解 + + # 获取预计算的三角函数值 + idx_x = angle_x // angle_step + idx_y = angle_y // angle_step + idx_z = angle_z // angle_step + + cos_x = cos_cache[idx_x] + sin_x = sin_cache[idx_x] + cos_y = cos_cache[idx_y] + sin_y = sin_cache[idx_y] + cos_z = cos_cache[idx_z] + sin_z = sin_cache[idx_z] + + # 构造旋转矩阵(展开矩阵乘法优化) + # R = Rz @ Ry @ Rx + # 计算矩阵元素(手动展开矩阵乘法) + m00 = cos_z * cos_y + m01 = cos_z * sin_y * sin_x - sin_z * cos_x + m02 = cos_z * sin_y * cos_x + sin_z * sin_x + + m10 = sin_z * cos_y + m11 = sin_z * sin_y * sin_x + cos_z * cos_x + m12 = sin_z * sin_y * cos_x - cos_z * sin_x + + m20 = -sin_y + m21 = cos_y * sin_x + m22 = cos_y * cos_x + + # 计算所有点的Z坐标 + z_values = np.empty(points.shape[0], dtype=np.float64) + for i in prange(points.shape[0]): + x, y, z = points[i, 0], points[i, 1], points[i, 2] + # 应用旋转矩阵 + rotated_z = m20 * x + m21 * y + m22 * z + z_values[i] = rotated_z + + # 计算重心Z(等效于平移后的重心) + # min_z = np.min(z_values) + min_z = safe_min(z_values) + avg_z = np.mean(z_values) + return avg_z - min_z # 等效于平移后的重心Z坐标 + +@njit(parallel=True, fastmath=True) +def _process_batch(batch, points, cos_cache, sin_cache, angle_step, results): + for i in prange(len(batch)): + ax, ay, az = batch[i] + results[i] = calculate_rotation_z( + ax, ay, az, points, + cos_cache, sin_cache, angle_step + ) + + +def parallel_rotation2(points, angle_step=3): + + #参数: + #points : numpy.ndarray (N,3) - 三维点云 + #angle_step : int - 角度搜索步长(度数) + + #返回: + #(best_angle_x, best_angle_y, best_angle_z, min_z) + + points_np = np.asarray(points) + points_float64 = points_np.astype(np.float64) + points = np.ascontiguousarray(points_float64) + + # points = np.ascontiguousarray(points.astype(np.float64)) + + # 生成所有可能角度 + angles = np.arange(0, 360, angle_step) + n_angles = len(angles) + + # 预计算三角函数值(大幅减少重复计算) + rads = np.radians(angles) + cos_cache = np.cos(rads).astype(np.float64) + sin_cache = np.sin(rads).astype(np.float64) + + # 生成所有角度组合(内存优化版) + total_combinations = n_angles ** 3 + # print(f"Total combinations: {total_combinations:,}") + + # 分块处理以避免内存溢出 + best_z = np.inf + best_angles = (0, 0, 0) + batch_size = 10 ** 6 # 根据可用内存调整 + + for x_chunk in range(0, n_angles, max(1, n_angles // 4)): + angles_x = angles[x_chunk:x_chunk + max(1, n_angles // 4)] + for y_chunk in range(0, n_angles, max(1, n_angles // 4)): + angles_y = angles[y_chunk:y_chunk + max(1, n_angles // 4)] + + # 生成当前分块的所有组合 + xx, yy, zz = np.meshgrid(angles_x, angles_y, angles) + current_batch = np.stack([xx.ravel(), yy.ravel(), zz.ravel()], axis=1) + + # 处理子批次 + for i in range(0, len(current_batch), batch_size): + batch = current_batch[i:i + batch_size] + results = np.zeros(len(batch), dtype=np.float64) + _process_batch(batch, points, cos_cache, sin_cache, angle_step, results) + + # 更新最佳结果 + min_idx = np.argmin(results) + if results[min_idx] < best_z: + best_z = results[min_idx] + best_angles = tuple(batch[min_idx]) + # print(f"New best: {best_angles} -> Z={best_z:.4f}") + + return (*best_angles, best_z) +#""" + +#""" +def rotate_x(angle): + theta = np.radians(angle) + return np.array([ + [1, 0, 0], + [0, np.cos(theta), -np.sin(theta)], + [0, np.sin(theta), np.cos(theta)] + ]) + +def rotate_y(angle): + theta = np.radians(angle) + return np.array([ + [np.cos(theta), 0, np.sin(theta)], + [0, 1, 0], + [-np.sin(theta), 0, np.cos(theta)] + ]) + +def rotate_z(angle): + theta = np.radians(angle) + return np.array([ + [np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1] + ]) + +def compute_z_height_and_center(rotated_points): + z_min, z_max = rotated_points[:, 2].min(), rotated_points[:, 2].max() + y_min, y_max = rotated_points[:, 1].min(), rotated_points[:, 1].max() + return (z_max - z_min), (y_min + y_max) / 2 + +def init_worker(shared_array, shape, dtype): + global global_points + global_points = np.frombuffer(shared_array, dtype=dtype).reshape(shape) + +def compute_rotation(args): + angle_x, angle_y, angle_z = args + R = rotate_z(angle_z) @ rotate_y(angle_y) @ rotate_x(angle_x) + rotated_points = global_points @ R.T + z_height, center_y = compute_z_height_and_center(rotated_points) + return (angle_x, angle_y, angle_z, z_height, center_y) + +def parallel_rotation3(points, angle_step=5): + # 生成所有旋转角度组合 + angles = itertools.product( + np.arange(0, 360, angle_step), + np.arange(0, 360, angle_step), + np.arange(0, 360, angle_step) + ) + + # 共享内存初始化 + shape, dtype = points.shape, points.dtype + shared_array = RawArray(ctypes.c_double, points.size) + shared_points = np.frombuffer(shared_array, dtype=dtype).reshape(shape) + np.copyto(shared_points, points) + + # 多进程计算 + with Pool(initializer=init_worker, initargs=(shared_array, shape, dtype)) as pool: + results = pool.imap_unordered(compute_rotation, angles, chunksize=100) + + # 寻找最优解 + best_angle = (0, 0, 0) + min_z_height = float('inf') + min_center_y = 0 + for result in results: + if result[3] < min_z_height: + best_angle = result[:3] + min_z_height = result[3] + min_center_y = result[4] + + return (*best_angle, min_center_y) +#""" + +#""" +def parallel_rotation4(points, angle_step=4): + """仅绕 Y 轴旋转(假设 X/Z 轴不影响目标函数)""" + min_center = float('inf') + best_angle = 0 # 遍历所有角度组合 + """ + for angle_x in range(0, 360, angle_step): + for angle_y in range(0, 360, angle_step): + for angle_z in range(0, 360, angle_step): + center_of_mass_z, ax, ay, az = calculate_rotation_and_center_of_mass( + angle_x, angle_y, angle_z, points + ) + if center_of_mass_z < min_center_of_mass_y: + min_center_of_mass_y = center_of_mass_z + best_angle_x, best_angle_y, best_angle_z = ax, ay, az + + """ + #""" + for angle_x in range(-45, 45, angle_step): + for angle_y in range(0, 360, angle_step): + center_z, ax, ay, _ = calculate_rotation_and_center_of_mass(angle_x, angle_y, 0, points) + if center_z < min_center: + min_center = center_z + best_angle_x = ax + best_angle_y = ay + #""" + return (best_angle_x, best_angle_y, 0, min_center) + +#""" + +def read_mesh(obj_path, simple=True): + mesh_obj = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) # + return mesh_obj + if not simple: + return mesh_obj + original_triangles = len(mesh_obj.triangles) + target_triangles = original_triangles if original_triangles <= 10000 else 10000 + if original_triangles > 10000: + mesh_obj = mesh_obj.simplify_quadric_decimation( + target_number_of_triangles=target_triangles, + maximum_error=0.0001, + boundary_weight=1.0 + ) + + return mesh_obj + +def compute_mesh_center(vertices): + """ + 计算网格质心 + + 参数: + vertices: 顶点坐标数组,形状为(N, 3)的NumPy数组或列表 + + 返回: + centroid: 质心坐标的NumPy数组 [x, y, z] + """ + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + n = len(vertices) # 顶点数量 + # 初始化坐标累加器 + sum_x, sum_y, sum_z = 0.0, 0.0, 0.0 + + # 遍历所有顶点累加坐标值 + for vertex in vertices: + sum_x += vertex[0] + sum_y += vertex[1] + sum_z += vertex[2] + + # 计算各坐标轴的平均值 + centroid = np.array([sum_x / n, sum_y / n, sum_z / n]) + return centroid + +def down_sample(pcd, voxel_size, farthest_sample = False): + original_num = len(pcd.points) + target_samples = 1500 # 1000 + num_samples = min(target_samples, original_num) + + # 第一步:使用体素下采样快速减少点数量 + # voxel_size = 3 + if farthest_sample: + pcd_voxel = pcd.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_voxel = pcd.voxel_down_sample(voxel_size) + down_num = len(pcd_voxel.points) + # print(f"original_num={original_num}, down_num={down_num}") + + # 第二步:仅在必要时进行最远点下采样 + if len(pcd_voxel.points) > target_samples and False: + pcd_downsampled = pcd_voxel.farthest_point_down_sample(num_samples=num_samples) + else: + pcd_downsampled = pcd_voxel + + return pcd_downsampled + +def get_lowest_position_of_center(obj_path,voxel_size,dict_origin,total_matrix): + + mesh_obj = read_mesh(obj_path) + dict_origin[obj_path] = mesh_obj + # dict_origin[obj_path] = copy.deepcopy(mesh_obj) + + #o3d.visualization.draw_geometries([mesh_obj]) + vertices = np.asarray(mesh_obj.vertices) + + # 确保网格有顶点 + if len(vertices) == 0: + # raise ValueError(f"Mesh has no vertices: {obj_path}") + print(f"Warning: Mesh has no vertices: {obj_path}") + return None + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + # o3d.visualization.draw_geometries([pcd]) + # pcd = o3d.io.read_point_cloud(ply_file_path) + + # print("voxel_size",voxel_size,obj_path, len(pcd.points), len(mesh_obj.vertices)) + + # 对点云进行下采样(体素网格法) + #""" + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + + if len(np.asarray(pcd_downsampled.points)) <= 0: + bbox = pcd.get_axis_aligned_bounding_box() + volume = bbox.volume() + + print(f"len(pcd.points)={len(pcd.points)}, volume={volume}") + + # 处理体积为零的情况 + if volume <= 0: + # 计算点云的实际范围 + points = np.asarray(pcd.points) + if len(points) > 0: + min_bound = np.min(points, axis=0) + max_bound = np.max(points, axis=0) + extent = max_bound - min_bound + + # 确保最小维度至少为0.01 + min_dimension = max(0.01, np.min(extent)) + volume = min_dimension ** 3 + else: + volume = 1.0 # 最后的安全回退 + + print(f"Warning: Zero volume detected, using approximated volume {volume:.6f} for {obj_path}") + + # 安全计算密度 - 防止除零错误 + if len(pcd.points) > 0 and volume > 0: + original_density = len(pcd.points) / volume + voxel_size = max(0.01, min(10.0, 0.5 / (max(1e-6, original_density) ** 0.33))) + else: + # 当点数为0或体积为0时使用默认体素大小 + voxel_size = 1.0 # 默认值 + + print(f"Recalculated voxel_size: {voxel_size} for {obj_path}") + + pcd_downsampled = down_sample(pcd, voxel_size) + pcd_downsampled.paint_uniform_color([0, 0, 1]) + #""" + + original_num = len(pcd.points) + target_samples = 1000 + num_samples = min(target_samples, original_num) + + # 确保下采样后有点云 + if len(np.asarray(pcd_downsampled.points)) == 0: + # 使用原始点云作为后备 + pcd_downsampled = pcd + print(f"Warning: Using original point cloud for {obj_path} as downsampling produced no points") + + points = np.asarray(pcd_downsampled.points) + + # 初始化最小重心Y的值 + min_center_of_mass_y = float('inf') + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + start_time = time.time() + # 旋转并计算最优角度:绕X、Y、Z轴进行每度的旋转 + # best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation(points, angle_step=3) + # best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation2(points, angle_step=3) + # best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation3(points, angle_step=3) + best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y = parallel_rotation4(points, angle_step=3) + + print("get_lowest_position_of_center", obj_path, best_angle_x,best_angle_y,best_angle_z,"time",time.time()-start_time) + + """ + # if best_angle_x >= 180: + if best_angle_x >= 155 and best_angle_x <= 325: + best_angle_x += 180 + if best_angle_y >= 180: + best_angle_y += 180 + if best_angle_z >= 180: + best_angle_z += 180 + #""" + + # 记录结束时间 + end_time = time.time() + elapsed_time = end_time - start_time + # print(f"Time taken: {elapsed_time:.2f} seconds") + # 输出最佳的旋转角度 + # print(f"Best Rotation Angles: angle_x = {best_angle_x}, angle_y = {best_angle_y}, angle_z = {best_angle_z}") + # print(f"Minimum Y Center of Mass: {min_center_of_mass_y}") + #time.sleep(1000) + + # 使用最佳角度进行旋转并平移obj + pcd_transformed = copy.deepcopy(mesh_obj) + #""" + center = pcd_transformed.get_center() + #arrow = o3d.geometry.TriangleMesh.create_arrow(0.05, 0.1) + #arrow.translate(center) + #o3d.visualization.draw_geometries([pcd_transformed, arrow]) + + # 最佳角度旋转 + R_x = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([1, 0, 0]) * np.radians(best_angle_x)) + pcd_transformed.rotate(R_x) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * np.radians(best_angle_y)) + pcd_transformed.rotate(R_y) + R_z = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 0, 1]) * np.radians(best_angle_z)) + pcd_transformed.rotate(R_z) + + # centroid = pcd.get_center() + centroid = pcd_transformed.get_center() + # z_mean1 = centroid[2] + + T_x = np.eye(4) + T_x[:3, :3] = R_x + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_x @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_y = np.eye(4) + T_y[:3, :3] = R_y + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_y @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + T_z = np.eye(4) + T_z[:3, :3] = R_z + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_z @ T_center_to_origin + + total_matrix = T_rot_center @ total_matrix + + #arrow = o3d.geometry.TriangleMesh.create_arrow(0.05, 0.1) + #arrow.translate(center) + #o3d.visualization.draw_geometries([pcd_transformed, arrow]) + #""" + + #""" + #试着旋转180,让脸朝上 + + # + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans1 = np.eye(4) + T_trans1[:3, 3] = translation_vector + total_matrix = T_trans1 @ total_matrix + # + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean1 = np.mean(vertices[:, 2]) + + angle_rad = np.pi + #print("旋转前质心:", pcd_transformed.get_center()) + #print("旋转前点示例:", np.asarray(pcd_transformed.vertices)[:3]) + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + centroid = pcd_transformed.get_center() + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + # 构建绕中心点旋转的变换矩阵[3](@ref) + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + R_y180 = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * angle_rad) + T_rotate = np.eye(4) + T_rotate[:3, :3] = R_y180 + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + T_rot_center = T_origin_to_center @ T_rotate @ T_center_to_origin + total_matrix = T_rot_center @ total_matrix + + #print("旋转后质心:", pcd_transformed.get_center()) + #print("旋转后点示例:", np.asarray(pcd_transformed.vertices)[:3]) + + # centroid = pcd.get_center() + centroid = pcd_transformed.get_center() + # z_mean2 = centroid[2] + + # + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + max_z = np.max(vertices[:, 2]) + # print("min_z1", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + # translation_vector = np.array([0,0,-min_z + (min_z-max_z),]) + # print("translation_vector1",translation_vector) + pcd_transformed.translate(translation_vector) + + T_trans2 = np.eye(4) + T_trans2[:3, 3] = translation_vector + translation = total_matrix[:3, 3] + # print("translation_vector2",translation_vector) + # print(1,translation) + + total_matrix = T_trans2 @ total_matrix + translation = total_matrix[:3, 3] + # print(2,translation) + + # 计算 z 坐标均值 + vertices = np.asarray(pcd_transformed.vertices) + z_mean2 = np.mean(vertices[:, 2]) + + # print("z_mean",z_mean1,z_mean2,len(pcd_transformed.vertices),obj_path) + + if (z_mean2 > z_mean1): + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + centroid = pcd_transformed.get_center() + + aabb = pcd_transformed.get_axis_aligned_bounding_box() + # center_point = aabb.get_center() + center_point = compute_mesh_center(mesh_obj.vertices) + + pcd_transformed.translate(-center_point) + pcd_transformed.rotate(R_y, center=(0, 0, 0)) + pcd_transformed.translate(center_point) + + T_center_to_origin = np.eye(4) + T_center_to_origin[:3, 3] = -center_point + T_origin_to_center = np.eye(4) + T_origin_to_center[:3, 3] = center_point + # 构建反向旋转矩阵 + R_y = pcd_transformed.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * -angle_rad) + T_rotate_inv = np.eye(4) + T_rotate_inv[:3, :3] = R_y + # 完整的反向绕中心旋转矩阵 + T_rot_center_inv = T_origin_to_center @ T_rotate_inv @ T_center_to_origin + total_matrix = T_rot_center_inv @ total_matrix + #""" + + vertices = np.asarray(pcd_transformed.vertices) + # 计算平移向量,将最小Y值平移到0 + min_z = np.min(vertices[:, 2]) + # print("min_z2", min_z, obj_path) + translation_vector = np.array([0,0,-min_z,]) + pcd_transformed.translate(translation_vector) + + T_trans3 = np.eye(4) + T_trans3[:3, 3] = translation_vector + total_matrix = T_trans3 @ total_matrix + + translation = total_matrix[:3, 3] + # print(3,translation) + + return pcd_transformed, total_matrix + +def axis_angle_to_rotation_matrix(axis, angle): + """手动生成旋转矩阵""" + axis = axis / np.linalg.norm(axis) # 单位化 + cos_a = np.cos(angle) + sin_a = np.sin(angle) + return np.array([ + [cos_a + axis[0]**2*(1-cos_a), + axis[0]*axis[1]*(1-cos_a) - axis[2]*sin_a, + axis[0]*axis[2]*(1-cos_a) + axis[1]*sin_a], + [axis[1]*axis[0]*(1-cos_a) + axis[2]*sin_a, + cos_a + axis[1]**2*(1-cos_a), + axis[1]*axis[2]*(1-cos_a) - axis[0]*sin_a], + [axis[2]*axis[0]*(1-cos_a) - axis[1]*sin_a, + axis[2]*axis[1]*(1-cos_a) + axis[0]*sin_a, + cos_a + axis[2]**2*(1-cos_a)] + ]) + +def arrange_box_correctly(obj_transformed, voxel_size,total_matrix): + + vertices = np.asarray(obj_transformed.vertices) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + # 降采样与特征计算 + pcd_downsampled = down_sample(pcd, voxel_size) + + original_num = len(pcd.points) + target_samples = 1000 + + points = np.asarray(pcd_downsampled.points) + cov = np.cov(points.T) + + center = obj_transformed.get_center() + + # 特征分解与方向约束(关键修改点) + eigen_vals, eigen_vecs = np.linalg.eigh(cov) + max_axis = eigen_vecs[:, np.argmax(eigen_vals)] + + # arrow = o3d.geometry.TriangleMesh.create_arrow(0.05, 0.1) + # arrow.translate(center) + # o3d.visualization.draw_geometries([obj_transformed, arrow]) + + # print("max_axis", max_axis) + # 强制主方向向量X分量为正(指向右侧) + if max_axis[0] < 0 or (max_axis[0] == 0 and max_axis[1] < 0): + max_axis = -max_axis + + target_dir = np.array([1, 0]) # 目标方向为X正轴 + current_dir = max_axis[:2] / np.linalg.norm(max_axis[:2]) + dot_product = np.dot(current_dir, target_dir) + + # print("dot_product", dot_product) + if dot_product < 0.8: # 阈值控制方向敏感性(建议0.6~0.9) + max_axis = -max_axis # 强制翻转方向 + + # 计算旋转角度 + angle_z = np.arctan2(max_axis[1], max_axis[0]) % (2 * np.pi) + + + if max_axis[0] <= 0 and max_axis[1] <= 0: + angle_z += np.pi + + # print("max_axis2", max_axis, angle_z / np.pi * 180 % 360) + + # angle_z = 0 + R = o3d.geometry.get_rotation_matrix_from_axis_angle([0, 0, -angle_z]) + + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = center - R.dot(center) # 保持中心不变 + obj_transformed.transform(T) + + total_matrix = T @ total_matrix + + #arrow = o3d.geometry.TriangleMesh.create_arrow(0.05, 0.1) + #arrow.translate(center) + #o3d.visualization.draw_geometries([obj_transformed, arrow]) + + return obj_transformed, total_matrix + +def get_new_bbox(obj_transformed_second,obj_name,weight_fix_out_dir,weight_fix_out_ply_dir,voxel_size,show_chart,dict_fix,compact_min_dis,total_matrix): + + # 计算点云的边界 + points = np.asarray(obj_transformed_second.vertices) + + min_bound = np.min(points, axis=0) # 获取点云的最小边界 + max_bound = np.max(points, axis=0) # 获取点云的最大边界 + + # 确保包围盒的Y坐标不低于0 + min_bound[2] = max(min_bound[2], 0) # 确保Y坐标的最小值不低于0 + + # 重新计算包围盒的中心和半长轴 + bbox_center = (min_bound + max_bound) / 2 # 计算包围盒的中心点 + bbox_extent = (max_bound - min_bound) # 计算包围盒的半长轴(尺寸) + + # 创建包围盒,确保尺寸正确 + new_bbox = o3d.geometry.OrientedBoundingBox(center=bbox_center, + R=np.eye(3), # 旋转矩阵,默认没有旋转 + extent=bbox_extent) + # 获取包围盒的长、宽和高 + x_length = round(bbox_extent[0],3) # X 方向的长 + y_length = round(bbox_extent[1],3) # Y 方向的宽 + z_length = round(bbox_extent[2],3) # Z 方向的高 + bbox_points = np.array([ + [min_bound[0], min_bound[1], min_bound[2]], + [max_bound[0], min_bound[1], min_bound[2]], + [max_bound[0], max_bound[1], min_bound[2]], + [min_bound[0], max_bound[1], min_bound[2]], + [min_bound[0], min_bound[1], max_bound[2]], + [max_bound[0], min_bound[1], max_bound[2]], + [max_bound[0], max_bound[1], max_bound[2]], + [min_bound[0], max_bound[1], max_bound[2]] + ]) + first_corner = bbox_points[2] + translation_vector = -first_corner + # start_time = time.time() + obj_transformed_second.translate(translation_vector) + + T_trans = np.eye(4) + T_trans[:3, 3] = translation_vector # 设置平移分量 [2,3](@ref) + total_matrix = T_trans @ total_matrix # 矩阵乘法顺序:最新变换左乘[4,5](@ref) + + new_bbox.translate(translation_vector) + # print("get_new_bbox1",time.time()-start_time) + + vertices = np.asarray(obj_transformed_second.vertices) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(vertices) + + ply_print_pid = obj_name.replace(".obj","") + ply_name = f"{ply_print_pid}={z_length}+{y_length}+{x_length}.ply" + ply_out_path = os.path.join(weight_fix_out_ply_dir,ply_name) + # o3d.io.write_point_cloud(ply_out_path, pcd_downsampled) + # o3d.io.write_point_cloud(ply_out_path, pcd) + + if compact_min_dis: + original_num = len(pcd.points) + target_samples = 1500 # 1000 + num_samples = min(target_samples, original_num) + start_time = time.time() + pcd_downsampled = down_sample(pcd, voxel_size, False) + # print("down_sample time =",time.time()-start_time) + dict_fix[ply_name] = pcd_downsampled + else: + dict_fix[ply_name] = pcd + + # print("dict_fix write",ply_name) + # print("voxel_down_sample&&write_point_cloud",time.time()-start_time) + + if show_chart: + # 创建包围盒的轮廓(线框) + new_bbox_lines = o3d.geometry.LineSet.create_from_oriented_bounding_box(new_bbox) + new_bbox_lines.paint_uniform_color([1, 0, 0]) # 红色 + + #创建坐标系 + coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2, origin=[0, 0, 0]) + + # 创建Y=0的平面点云 + pcd_plane = make_pcd_plane() + # 可视化点云和边界框 + o3d.visualization.draw_geometries([obj_transformed_second, coordinate_frame, pcd_plane, new_bbox_lines]) + + return obj_transformed_second, total_matrix + +def load_obj_data(get_pid,base_out_dir): + """下载obj文件""" + access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAI5tBWbfkZntfJij4Fg9gz') + access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'sYDps1i9NSge6hn130EgpPJezKM6Gx') + bucket_name = os.getenv('OSS_TEST_BUCKET', 'suwa3d-securedata') + endpoint = os.getenv('OSS_TEST_ENDPOINT', 'https://oss-cn-shanghai.aliyuncs.com') + bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) + # if not os.path.exists(base_out_dir): + # os.makedirs(base_out_dir) + prefix = f"objs/print/{get_pid}/" + out_pid_dir = os.path.join(base_out_dir,str(get_pid)) + for obj in oss2.ObjectIterator(bucket, prefix=prefix): + # 如果迭代器至少返回一个对象,那么表示该"文件夹"存在 + if not os.path.exists(out_pid_dir): + os.makedirs(out_pid_dir) + #print("Folder exists.") + filename = obj.key[len(prefix):] + if get_pid not in filename: + continue + print("获取的文件名称",filename) + local_path = os.path.join(out_pid_dir, filename) + # 下载对象到本地文件 + try: + bucket.get_object_to_file(obj.key, local_path) + print(f"Downloaded {obj.key} to {local_path}") + except: + print("下载错误",get_pid) + record_text = "./error_load.txt" + with open(record_text, 'a') as f: + f.write(get_pid + '\n') + + else: + # 如果迭代器没有返回任何对象,那么表示该"文件夹"不存在 + print("ossFolder does not exist.",get_pid) + +def custom_mesh_transform(vertices, transform_matrix): + """ + 手动实现网格变换:对每个顶点应用齐次变换矩阵 + 参数: + vertices: 网格顶点数组 (N, 3) + transform_matrix: 4x4 齐次变换矩阵 + 返回: + 变换后的顶点数组 (N, 3) + """ + # 1. 顶点转齐次坐标 (N, 3) → (N, 4) + homogeneous_vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) + + # 2. 应用变换矩阵:矩阵乘法 (4x4) * (4xN) → (4xN) + transformed_homogeneous = transform_matrix @ homogeneous_vertices.T + + # 3. 转回非齐次坐标 (3xN) → (N, 3) + transformed_vertices = transformed_homogeneous[:3, :].T + return transformed_vertices + +def make_bbox_for_print(base_out_dir,weight_fix_out_dir,weight_fix_out_ply_dir,show_chart,dict_bad, dict_best_angel,dict_fix,dict_origin,dict_origin_real, compact_min_dis,dict_total_matrix): + """获取需要的盒子大小""" + # 加载点云数据 + start_time1 = time.time() + obj_id_list = [aa.split(".o")[0] for aa in os.listdir(base_out_dir) if aa.endswith(".obj")] + #print(obj_id_list) + #print(len(obj_id_list)) + #random.shuffle(obj_id_list) + obj_id_list = obj_id_list + #print(obj_id_list) + voxel_size = 3 # 设置体素的大小,决定下采样的密度 + + #for pid in tqdm(obj_id_list,desc="get new bbox"): + dict_mesh_obj = {} + for pid_t_y in obj_id_list: + start_time2 = time.time() + obj_name = pid_t_y+".obj" + obj_path = os.path.join(base_out_dir,obj_name) + + total_matrix = np.eye(4) + #放置最大接触面 + # obj_transformed, total_matrix = get_lowest_position_of_center(obj_path,voxel_size,dict_origin,total_matrix) + + mesh_obj = read_mesh(obj_path) + + # dict_origin_real[obj_path] = copy.deepcopy(mesh_obj) + dict_origin[obj_path] = copy.deepcopy(mesh_obj) + + start_time3 = time.time() + total_matrix, z_min= get_lowest_position_of_center_ext(obj_path, total_matrix) + # print("get_lowest_position_of_center_ext time", time.time()-start_time3) + # print(f"total_matrix={total_matrix}") + print(f"z_min={z_min}") + printId = "" + match = re.search(r"P(\d+)", obj_name) # 匹配 "P" 后的连续数字 + if match: + printId = match.group(1) + # print("printId", printId) + + # total_matrix, z_mean_min = get_lowest_position_of_center_net(printId, total_matrix) + + # print("total_matrix=", total_matrix) + + original_vertices = np.asarray(mesh_obj.vertices) + transformed_vertices = custom_mesh_transform(original_vertices, total_matrix) + mesh_obj.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + # print("dict_origin[] obj_path=", obj_path) + # dict_origin[obj_path] = mesh_obj + + obj_transformed = copy.deepcopy(mesh_obj) + translation = total_matrix[:3, 3] + # print("make_bbox_for_print0", obj_name, translation) + + if obj_transformed is None: + dict_bad[obj_name]=obj_name + # print(len(dict_bad)) + # print(obj_name) + # 记录错误文件 + error_log = os.path.join(base_out_dir, "error_files.txt") + with open(error_log, 'a') as f: + f.write(f"{obj_path}\n") + print(f"Skipping invalid file: {obj_path}") + continue + + start_time3 = time.time() + best_angle_x, best_angle_y, best_angle_z, z_mean_min = get_lowest_position_of_center_ext3(mesh_obj, obj_path,voxel_size) + # print("get_lowest_position_of_center_ext2 time", time.time()-start_time3) + # print("best_angle=", best_angle_x, best_angle_y, best_angle_z, z_mean_min) + dict_best_angel[obj_name] = [int(round(best_angle_x)), int(round(best_angle_y)), int(round(best_angle_z))] + + start_time3 = time.time() + #将点云摆正和X轴平衡 + obj_transformed_second,total_matrix = arrange_box_correctly(obj_transformed,voxel_size,total_matrix) + # print("arrange_box_correctly time", time.time()-start_time3) + """ + # 创建可视化窗口 + vis = o3d.visualization.Visualizer() + vis.create_window(window_name='模型展示') + + # 添加所有模型到场景 + vis.add_geometry(obj_transformed_second) + + # 设置相机视角 + vis.get_render_option().mesh_show_back_face = True + vis.get_render_option().light_on = True + + # 运行可视化 + vis.run() + vis.destroy_window() + #""" + + #print("摆正后的obj") + #o3d.visualization.draw_geometries([obj_transformed_second, ]) + start_time3 = time.time() + mesh_obj,total_matrix = get_new_bbox(obj_transformed_second,obj_name,weight_fix_out_dir,weight_fix_out_ply_dir,voxel_size,show_chart,dict_fix,compact_min_dis,total_matrix) + dict_mesh_obj[obj_name] = mesh_obj + # print("get_new_bbox time", time.time()-start_time3) + + dict_total_matrix[obj_name] = total_matrix + print(f"make_bbox_for_print {obj_name} time={time.time()-start_time2}") + + print(f"make_bbox_for_print total_time={time.time()-start_time1}") + + return dict_mesh_obj +import re +def copy_obj_2x(base_obj_dir): + obj_list = [aa for aa in os.listdir(base_obj_dir) if aa.endswith(".obj")] + for obj_name in obj_list: + if "_F" in obj_name: + continue + obj_count = obj_name.split("x")[-1].replace(".obj","") + if not obj_count.isnumeric(): + match = re.search(r"x(\d+)", obj_name) + if match: + obj_count = match.group(1) + else: + print("未找到 x 后的数字") + + obj_count_num = int(obj_count) + if obj_count_num!=1: + for i in range(obj_count_num-1): + origin_path = os.path.join(base_obj_dir,obj_name) + dis_path = os.path.join(base_obj_dir,obj_name.replace(".obj",f"_F{i+1}.obj")) + if not os.path.exists(dis_path): + shutil.copy(origin_path,dis_path) + print(dis_path,"复制成功") + +def save_mesh_with_blender(obj_transformed, obj_path, apply_transform=True): + # o3d.io.write_triangle_mesh(obj_path, obj_transformed,write_triangle_uvs=True) + # return + """使用 Blender 导出变换后的网格""" + # 创建新网格对象 + mesh = bpy.data.meshes.new("TempMesh") + mesh.from_pydata( + np.asarray(obj_transformed.vertices), + [], + np.asarray(obj_transformed.triangles).tolist() + ) + obj = bpy.data.objects.new("TempObject", mesh) + + # 链接到场景 + bpy.context.collection.objects.link(obj) + + # 设置上下文 + original_selection = bpy.context.selected_objects.copy() + original_active = bpy.context.view_layer.objects.active + bpy.ops.object.select_all(action='DESELECT') + obj.select_set(True) + bpy.context.view_layer.objects.active = obj + + # 应用变换 + if apply_transform: + bpy.ops.object.transform_apply( + location=True, + rotation=True, + scale=True + ) + + # 配置导出参数 + export_settings = { + 'filepath': obj_path, + 'export_selected_objects': True, + 'export_triangulated_mesh': True, + 'forward_axis': 'Y', + 'up_axis': 'Z', + 'global_scale': 1.0 + } + + # 执行导出 + try: + bpy.ops.wm.obj_export(**export_settings) + finally: + # 清理临时对象 + bpy.data.objects.remove(obj, do_unlink=True) + bpy.data.meshes.remove(mesh) + + # 恢复原始上下文 + bpy.ops.object.select_all(action='DESELECT') + for o in original_selection: + o.select_set(True) + bpy.context.view_layer.objects.active = original_active + +if __name__ == '__main__': + out_dir = "/data/datasets_20t/type_setting_test_data/" + base_out_dir = f"{out_dir}/8/" + weight_fix_out_dir = f"{out_dir}/print_weight_fix_data_obj" + weight_fix_out_ply_dir = f"{out_dir}/print_weight_fix_data_ply" + copy_obj_2x(base_out_dir) \ No newline at end of file diff --git a/print_type_setting_gui.py b/print_type_setting_gui.py new file mode 100644 index 0000000..905058f --- /dev/null +++ b/print_type_setting_gui.py @@ -0,0 +1,346 @@ +import sys +import os +from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel, + QVBoxLayout, QFileDialog, QMessageBox, + QRadioButton, QHBoxLayout, QButtonGroup, QGroupBox) +from PyQt5.QtCore import QProcess, QTimer +from threading import Thread +import print_factory_type_setting_obj_run + + +class AutoLayoutApp(QWidget): + def __init__(self): + super().__init__() + self.small_model_temp = False + self.setWindowTitle("自动排版工具") + self.setGeometry(200, 200, 600, 380) # 增加窗口高度以容纳新控件 + self.dw = print_factory_type_setting_obj_run + self.folder_path = "" + self.cache_path = "" + self.process = None + if self.small_model_temp: + self.selected_mode = "紧凑" # 默认排版模式 + self.output_format = "模型" # 默认输出格式 + self.selected_machine = "小机型" # 默认机型 + else: + self.selected_mode = "标准" # 默认排版模式 + self.output_format = "JSON" # 默认输出格式 + self.selected_machine = "大机型" # 默认机型 + self.is_running = False # 跟踪排版状态 + + self.init_ui() + + def init_ui(self): + layout = QVBoxLayout() + + # ===== 机型选择区域(移到最上面) ===== + machine_group = QGroupBox("机型选择") + machine_layout = QHBoxLayout() + + # 创建机型选择按钮组 + self.machine_group = QButtonGroup(self) + + # 创建两种机型选项 + self.large_machine = QRadioButton("大机型(600 * 500 * 300)") + self.small_machine = QRadioButton("小机型(380 * 345 * 250)") + + if self.small_model_temp: + self.large_machine.setEnabled(False) + self.small_machine.setChecked(True) # 默认选中 + else: + self.large_machine.setChecked(True) # 默认选中 + + # 添加到按钮组 + self.machine_group.addButton(self.large_machine, 1) + self.machine_group.addButton(self.small_machine, 2) + + # 连接信号 + self.machine_group.buttonToggled.connect(self.on_machine_changed) + + # 添加到布局 + machine_layout.addWidget(self.large_machine) + machine_layout.addWidget(self.small_machine) + machine_group.setLayout(machine_layout) + layout.addWidget(machine_group) + + # ===== 排版模式选择区域(改为QGroupBox) ===== + mode_group = QGroupBox("排版模式") + mode_layout = QHBoxLayout() + + # 创建单选按钮组 + self.mode_group = QButtonGroup(self) + + # 创建三种排版模式选项 + self.standard_mode = QRadioButton("标准模式(适合规整模型组)") + self.compact_mode = QRadioButton("紧凑模式(复杂度较高))") + self.advanced_mode = QRadioButton("高级模式(复杂度最高)") + self.advanced_mode.setVisible(False) + + if self.small_model_temp: + self.standard_mode.setEnabled(False) + self.compact_mode.setChecked(True) # 默认选中 + else: + self.standard_mode.setChecked(True) # 默认选中 + + # 添加到按钮组(确保互斥选择) + self.mode_group.addButton(self.standard_mode, 1) + self.mode_group.addButton(self.compact_mode, 2) + self.mode_group.addButton(self.advanced_mode, 3) + + # 连接信号 + self.mode_group.buttonToggled.connect(self.on_mode_changed) + + # 添加到布局 + mode_layout.addWidget(self.standard_mode) + mode_layout.addWidget(self.compact_mode) + mode_layout.addWidget(self.advanced_mode) + mode_group.setLayout(mode_layout) + layout.addWidget(mode_group) + + # ==== 输出格式选择区域 ==== + format_group = QGroupBox("输出格式") + format_layout = QHBoxLayout() + + # 创建输出格式按钮组 + self.format_group = QButtonGroup(self) + + # 创建两种输出格式选项 + self.json_format = QRadioButton("JSON格式") + self.model_format = QRadioButton("模型格式") + + if self.small_model_temp: + self.json_format.setEnabled(False) + self.model_format.setChecked(True) # 默认选中 + else: + self.json_format.setChecked(True) # 默认选中 + + # 添加到按钮组 + self.format_group.addButton(self.json_format, 1) + self.format_group.addButton(self.model_format, 2) + + # 连接信号 + self.format_group.buttonToggled.connect(self.on_format_changed) + + # 添加到布局 + format_layout.addWidget(self.json_format) + format_layout.addWidget(self.model_format) + format_group.setLayout(format_layout) + layout.addWidget(format_group) + + # ===== 原有UI元素 ===== + self.folder_path_label = QLabel(" 请选择要排版的文件夹") + self.folder_path_label.setWordWrap(True) + + self.run_status_label = QLabel("") + + layout.addWidget(self.folder_path_label) + layout.addWidget(self.run_status_label) + + self.select_folder_btn = QPushButton("选择文件夹") + self.select_folder_btn.clicked.connect(self.on_select_folder) + layout.addWidget(self.select_folder_btn) + + # 按钮布局(运行按钮和预览按钮在同一行) + buttons_layout = QHBoxLayout() + + self.run_btn = QPushButton("开始自动排版") + self.run_btn.clicked.connect(self.on_run_clicked) + buttons_layout.addWidget(self.run_btn) + + # ===== 新增预览按钮 ===== + self.preview_btn = QPushButton("预览排版结果") + self.preview_btn.clicked.connect(self.on_preview_clicked) + self.preview_btn.setEnabled(False) # 初始禁用,排版完成后才可用 + buttons_layout.addWidget(self.preview_btn) + + layout.addLayout(buttons_layout) + + self.open_output_btn = QPushButton("打开排版好的文件夹") + self.open_output_btn.clicked.connect(self.on_open_output_clicked) + layout.addWidget(self.open_output_btn) + + self.setLayout(layout) + + def on_mode_changed(self, button, checked): + """处理排版模式选择变化""" + if checked: + # self.selected_mode = button.text().replace("模式", "") + self.selected_mode = button.text()[:2] + self.run_status_label.setText(f"已选择: {self.selected_mode} 模式") + + def on_format_changed(self, button, checked): + """处理输出格式选择变化""" + if checked: + self.output_format = button.text().replace("格式", "") + self.run_status_label.setText(f"输出格式: {self.output_format}") + + def on_machine_changed(self, button, checked): + """处理机型选择变化""" + if checked: + self.selected_machine = button.text().split("(")[0] + self.run_status_label.setText(f"已选择: {self.selected_machine}") + + def on_select_folder(self): + folder = QFileDialog.getExistingDirectory(self, "选择文件夹") + if folder: + self.folder_path = folder + self.folder_path_label.setText(f" 当前选择文件夹: {folder}") + self.run_status_label.setText("") + self.cache_path = folder + "_arrange" + # self.cache_path = os.path.join(folder, "arrange") + + os.makedirs(self.cache_path, exist_ok=True) + self.preview_btn.setEnabled(False) # 选择新文件夹后禁用预览按钮 + + def get_base_directory(self): + """获取脚本或可执行文件的基础目录""" + if getattr(sys, 'frozen', False): + # 打包后的可执行文件环境 + base_path = os.path.dirname(sys.executable) + else: + # 正常脚本运行环境 + base_path = os.path.dirname(os.path.abspath(__file__)) + return base_path + + def on_run_clicked(self): + # 获取脚本所在目录的父目录 + script_dir = os.path.dirname(os.path.abspath(__file__)) + parent_dir = os.path.dirname(script_dir) + + # 获取基础目录 + base_path = self.get_base_directory() + # 获取父目录 + parent_dir = os.path.dirname(base_path) + bad_dir = os.path.join(parent_dir, "bad") + full_dir = os.path.join(parent_dir, "full") + + # 检查bad目录 + bad_dir_exists = os.path.exists(bad_dir) and os.path.isdir(bad_dir) + bad_dir_not_empty = bad_dir_exists and any(os.scandir(bad_dir)) + + # 检查full目录 + full_dir_exists = os.path.exists(full_dir) and os.path.isdir(full_dir) + full_dir_not_empty = full_dir_exists and any(os.scandir(full_dir)) + + # 如果有异常数据需要处理 + if bad_dir_not_empty or full_dir_not_empty: + message = "请处理以下目录中的异常数据:\n" + if bad_dir_not_empty: + message += f"- bad目录: {bad_dir}\n" + if full_dir_not_empty: + message += f"- full目录: {full_dir}\n" + + QMessageBox.warning(self, "存在未处理的异常数据", + message + "\n请先处理这些目录中的数据后再进行排版!") + self.run_status_label.setText("⚠️ 存在未处理的异常数据,请先处理!") + return + + def threadFunc1(): + self.is_running = True # 标记排版开始 + if not self.folder_path: + self.run_status_label.setText("❗请先选择一个文件夹再执行!") + self.is_running = False + return + + if self.process and self.process.state() == QProcess.Running: + self.run_status_label.setText("⚠️ 正在执行中,请稍候...") + self.is_running = False + return + + self.run_btn.setEnabled(False) + self.preview_btn.setEnabled(False) # 排版中禁用预览按钮 + self.run_status_label.setText( + f" 正在使用 [{self.selected_mode}] 排版, 机型: {self.selected_machine}, 输出格式: {self.output_format}, 请稍候..." + ) + + normalized_path = os.path.normpath(self.folder_path) + self.batch_id = os.path.basename(normalized_path) + + # 调用排版函数,传递所有参数 + self.dw.print_type_setting_obj( + base_original_obj_dir=self.folder_path, + cache_type_setting_dir=self.cache_path, + show_chart=False, + batch_id=self.batch_id, + selected_mode=self.selected_mode, + output_format=self.output_format, + selected_machine=self.selected_machine + ) + + self.run_status_label.setText( + f"✅ [{self.selected_mode}] 排版完成! 机型: {self.selected_machine}, 输出格式: {self.output_format}" + ) + self.run_btn.setEnabled(True) + self.preview_btn.setEnabled(True) # 排版完成后启用预览按钮 + self.is_running = False # 标记排版结束 + + thread = Thread(target=threadFunc1) + thread.start() + + def on_process_finished(self, exitCode, exitStatus): + self.run_btn.setEnabled(True) + self.preview_btn.setEnabled(True) # 完成后启用预览按钮 + if exitCode == 0: + self.run_status_label.setText("✅ 排版完成!") + else: + self.run_status_label.setText(f"❌❌ 进程异常退出 (代码: {exitCode})") + + def on_process_error(self, error): + self.run_btn.setEnabled(True) + self.preview_btn.setEnabled(False) # 出错时禁用预览按钮 + self.run_status_label.setText(f"❌❌ 发生错误: {error.name}") + QMessageBox.critical(self, "错误", f"进程执行出错: {error.name}") + + def open_file_cross_platform(self, path): + if not os.path.exists(path): + self.run_status_label.setText("⚠️ 路径不存在!") + return + if sys.platform.startswith('win'): + os.startfile(path) + elif sys.platform.startswith('darwin'): + QProcess.startDetached("open", [path]) + else: + QProcess.startDetached("xdg-open", [path]) + + def on_preview_clicked(self): + """预览按钮点击事件处理""" + if not self.cache_path: + self.run_status_label.setText("⚠️ 请先执行排版操作!") + return + + is_small_machine = self.selected_machine=="小机型" + + if os.path.exists(self.folder_path): + self.run_status_label.setText("正在打开预览目录...") + self.dw.preview( + base_original_obj_dir=self.folder_path,batch_id=self.batch_id, is_small_machine=is_small_machine + ) + else: + self.run_status_label.setText("⚠️ 预览目录不存在!") + + def on_open_output_clicked(self): + """打开排版结果文件夹""" + if not self.cache_path: + self.run_status_label.setText("⚠️ 请先执行排版操作!") + return + + open_dir = self.cache_path + if self.output_format=="JSON": + open_dir = self.folder_path + + if os.path.exists(open_dir): + self.open_file_cross_platform(open_dir) + else: + self.run_status_label.setText("⚠️ 输出文件夹不存在!") + + +if __name__ == "__main__": + from PyQt5.QtCore import QSharedMemory + app = QApplication(sys.argv) + shared_mem = QSharedMemory("AutoLayoutTool_unique_key") + if not shared_mem.create(1): + QMessageBox.critical(None, "错误", "程序已经在运行中!") + sys.exit(1) + window = AutoLayoutApp() + window.show() + sys.exit(app.exec_()) \ No newline at end of file diff --git a/print_type_setting_gui.spec b/print_type_setting_gui.spec new file mode 100644 index 0000000..958559e --- /dev/null +++ b/print_type_setting_gui.spec @@ -0,0 +1,38 @@ +# -*- mode: python ; coding: utf-8 -*- + + +a = Analysis( + ['print_type_setting_gui.py'], + pathex=[], + binaries=[], + datas=[], + hiddenimports=[], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.datas, + [], + name='print_type_setting_gui', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/print_type_setting_gui_multi.py b/print_type_setting_gui_multi.py new file mode 100644 index 0000000..abeb5b3 --- /dev/null +++ b/print_type_setting_gui_multi.py @@ -0,0 +1,126 @@ +import sys +import os +from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel, + QVBoxLayout, QFileDialog, QMessageBox) +from PyQt5.QtCore import QProcess, QTimer +from threading import Thread +import print_factory_type_setting_obj_run + + +class AutoLayoutApp(QWidget): + def __init__(self): + super().__init__() + self.setWindowTitle("自动排版工具") + self.setGeometry(200, 200, 600, 200) + self.dw= print_factory_type_setting_obj_run + self.folder_path = "" + self.cache_path = "" + self.process = None + + self.init_ui() + + def init_ui(self): + layout = QVBoxLayout() + + self.folder_path_label = QLabel("�� 请选择要排版的文件夹") + self.folder_path_label.setWordWrap(True) + + self.run_status_label = QLabel("") + + layout.addWidget(self.folder_path_label) + layout.addWidget(self.run_status_label) + + self.select_folder_btn = QPushButton("选择文件夹") + self.select_folder_btn.clicked.connect(self.on_select_folder) + layout.addWidget(self.select_folder_btn) + + self.run_btn = QPushButton("开始自动排版") + self.run_btn.clicked.connect(self.on_run_clicked) + layout.addWidget(self.run_btn) + + self.open_output_btn = QPushButton("打开排版好的文件夹") + self.open_output_btn.clicked.connect(self.on_open_output_clicked) + layout.addWidget(self.open_output_btn) + + self.setLayout(layout) + + def on_select_folder(self): + folder = QFileDialog.getExistingDirectory(self, "选择文件夹") + if folder: + self.folder_path = folder + self.folder_path_label.setText(f"�� 当前选择文件夹: {folder}") + self.run_status_label.setText("") + self.cache_path = folder + "_arrange" + os.makedirs(self.cache_path, exist_ok=True) + + def on_run_clicked(self): + def threadFunc1(): + if not self.folder_path: + self.run_status_label.setText("❗请先选择一个文件夹再执行!") + return + + if self.process and self.process.state() == QProcess.Running: + self.run_status_label.setText("⚠️ 正在执行中,请稍候...") + return + + self.run_btn.setEnabled(False) + self.run_status_label.setText("�� 程序正在运行,请稍候...") + + return_code = self.dw.print_type_setting_obj(base_original_obj_dir=self.folder_path,cache_type_setting_dir=self.cache_path, + show_chart=False) + if return_code==0: + self.run_status_label.setText("✅ 排版完成!") + elif return_code==-1: + self.run_status_label.setText("❌选择目录为空!") + else: + self.run_status_label.setText("❌排版失败!") + self.run_btn.setEnabled(True) + + thread = Thread(target=threadFunc1) + thread.start() + + def on_process_finished(self, exitCode, exitStatus): + self.run_btn.setEnabled(True) + if exitCode == 0: + self.run_status_label.setText("✅ 排版完成!") + else: + self.run_status_label.setText(f"❌ 进程异常退出 (代码: {exitCode})") + + def on_process_error(self, error): + self.run_btn.setEnabled(True) + self.run_status_label.setText(f"❌ 发生错误: {error.name}") + QMessageBox.critical(self, "错误", f"进程执行出错: {error.name}") + + def open_file_cross_platform(self, path): + if not os.path.exists(path): + self.run_status_label.setText("⚠️ 路径不存在!") + return + if sys.platform.startswith('win'): + os.startfile(path) + elif sys.platform.startswith('darwin'): + QProcess.startDetached("open", [path]) + else: + QProcess.startDetached("xdg-open", [path]) + + def on_open_output_clicked(self): + if not self.cache_path: + self.run_status_label.setText("⚠️ 请先执行排版操作!") + return + + output_path = os.path.join(self.cache_path, "print_compact_obj") + if os.path.exists(output_path): + self.open_file_cross_platform(output_path) + else: + self.run_status_label.setText("⚠️ 输出文件夹不存在!") + + +if __name__ == "__main__": + from PyQt5.QtCore import QSharedMemory + app = QApplication(sys.argv) + shared_mem = QSharedMemory("AutoLayoutTool_unique_key") + if not shared_mem.create(1): + QMessageBox.critical(None, "错误", "程序已经在运行中!") + sys.exit(1) + window = AutoLayoutApp() + window.show() + sys.exit(app.exec_()) \ No newline at end of file diff --git a/qt5_demo.py b/qt5_demo.py new file mode 100644 index 0000000..30ec0e6 --- /dev/null +++ b/qt5_demo.py @@ -0,0 +1,25 @@ +import sys +from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMessageBox + +class MyWindow(QWidget): + def __init__(self): + super().__init__() + + self.setWindowTitle("PyQt5 简单示例") + self.setGeometry(100, 100, 300, 200) + + self.button = QPushButton("点我一下", self) + self.button.setGeometry(100, 80, 100, 30) + self.button.clicked.connect(self.show_message) + + def show_message(self): + QMessageBox.information(self, "提示", "按钮被点击了!") + +if __name__ == "__main__": + app = QApplication(sys.argv) + window = MyWindow() + window.show() + sys.exit(app.exec_()) + """ + pyinstaller qt5_demo.py --hidden-import PySide2.QtXml + """ diff --git a/qt5_demo.spec b/qt5_demo.spec new file mode 100644 index 0000000..85043d8 --- /dev/null +++ b/qt5_demo.spec @@ -0,0 +1,44 @@ +# -*- mode: python ; coding: utf-8 -*- + + +a = Analysis( + ['qt5_demo.py'], + pathex=[], + binaries=[], + datas=[], + hiddenimports=['PySide2.QtXml'], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + noarchive=False, + optimize=0, +) +pyz = PYZ(a.pure) + +exe = EXE( + pyz, + a.scripts, + [], + exclude_binaries=True, + name='qt5_demo', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) +coll = COLLECT( + exe, + a.binaries, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='qt5_demo', +) diff --git a/sui_01.py b/sui_01.py new file mode 100644 index 0000000..75c0268 --- /dev/null +++ b/sui_01.py @@ -0,0 +1,275 @@ +import open3d as o3d +import os +import numpy as np +from scipy.spatial.transform import Rotation +import sys + +import argparse +# import cv2 +import matplotlib.pyplot as plt +import numpy as np +from numba import njit, prange +import time + + +# 核心计算函数(支持Numba加速) +@njit(fastmath=True, cache=True) +def calculate_rotation_z(angle_x, angle_y, angle_z, points, cos_cache, sin_cache, angle_step): + """计算单个旋转组合后的重心Z坐标(无显式平移)""" + # 获取预计算的三角函数值 + idx_x = angle_x // angle_step + idx_y = angle_y // angle_step + idx_z = angle_z // angle_step + + cos_x = cos_cache[idx_x] + sin_x = sin_cache[idx_x] + cos_y = cos_cache[idx_y] + sin_y = sin_cache[idx_y] + cos_z = cos_cache[idx_z] + sin_z = sin_cache[idx_z] + + # 构造旋转矩阵(展开矩阵乘法优化) + # R = Rz @ Ry @ Rx + # 计算矩阵元素(手动展开矩阵乘法) + m00 = cos_z * cos_y + m01 = cos_z * sin_y * sin_x - sin_z * cos_x + m02 = cos_z * sin_y * cos_x + sin_z * sin_x + + m10 = sin_z * cos_y + m11 = sin_z * sin_y * sin_x + cos_z * cos_x + m12 = sin_z * sin_y * cos_x - cos_z * sin_x + + m20 = -sin_y + m21 = cos_y * sin_x + m22 = cos_y * cos_x + + # 计算所有点的Z坐标 + z_values = np.empty(points.shape[0], dtype=np.float64) + for i in prange(points.shape[0]): + x, y, z = points[i, 0], points[i, 1], points[i, 2] + # 应用旋转矩阵 + rotated_z = m20 * x + m21 * y + m22 * z + z_values[i] = rotated_z + + # 计算重心Z(等效于平移后的重心) + min_z = np.min(z_values) + avg_z = np.mean(z_values) + return avg_z - min_z # 等效于平移后的重心Z坐标 + + +# 并行优化主函数 +def parallel_rotation2(points, angle_step=3): + """ + 参数: + points : numpy.ndarray (N,3) - 三维点云 + angle_step : int - 角度搜索步长(度数) + + 返回: + (best_angle_x, best_angle_y, best_angle_z, min_z) + """ + points = np.ascontiguousarray(points.astype(np.float64)) + + # 生成所有可能角度 + angles = np.arange(0, 360, angle_step) + n_angles = len(angles) + + # 预计算三角函数值(大幅减少重复计算) + rads = np.radians(angles) + cos_cache = np.cos(rads).astype(np.float64) + sin_cache = np.sin(rads).astype(np.float64) + + # 生成所有角度组合(内存优化版) + total_combinations = n_angles ** 3 + print(f"Total combinations: {total_combinations:,}") + + # 分块处理以避免内存溢出 + best_z = np.inf + best_angles = (0, 0, 0) + batch_size = 10 ** 6 # 根据可用内存调整 + + for x_chunk in range(0, n_angles, max(1, n_angles // 4)): + angles_x = angles[x_chunk:x_chunk + max(1, n_angles // 4)] + for y_chunk in range(0, n_angles, max(1, n_angles // 4)): + angles_y = angles[y_chunk:y_chunk + max(1, n_angles // 4)] + + # 生成当前分块的所有组合 + xx, yy, zz = np.meshgrid(angles_x, angles_y, angles) + current_batch = np.stack([xx.ravel(), yy.ravel(), zz.ravel()], axis=1) + + # 处理子批次 + for i in range(0, len(current_batch), batch_size): + batch = current_batch[i:i + batch_size] + results = np.zeros(len(batch), dtype=np.float64) + _process_batch(batch, points, cos_cache, sin_cache, angle_step, results) + + # 更新最佳结果 + min_idx = np.argmin(results) + if results[min_idx] < best_z: + best_z = results[min_idx] + best_angles = tuple(batch[min_idx]) + print(f"New best: {best_angles} -> Z={best_z:.4f}") + + return (*best_angles, best_z) + + +@njit(parallel=True, fastmath=True) +def _process_batch(batch, points, cos_cache, sin_cache, angle_step, results): + for i in prange(len(batch)): + ax, ay, az = batch[i] + results[i] = calculate_rotation_z( + ax, ay, az, points, + cos_cache, sin_cache, angle_step + ) + + +class ModelProcessor: + def __init__(self): + + # argv = sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [] + parser = argparse.ArgumentParser() + + parser.add_argument( + "--id", + required=False, + ) + + args = parser.parse_args() + + self.id = args.id + + self.mesh = None + self.asset_dir = f"/home/algo/Documents/datasets/{self.id}" + + def load_model(self): + """加载并初始化3D模型""" + # model_path = f"{self.asset_dir}/baked/{self.id}.obj" + # model_path = f"{self.asset_dir}/repair_{self.id}_mesh.ply" + model_path = "/data/datasets_20t/8/88884_253283_P65951_6cm_x1.obj" + if not os.path.exists(model_path): + raise FileNotFoundError(f"Model file not found: {model_path}") + + print(model_path) + + mesh_native = o3d.io.read_triangle_mesh(model_path, enable_post_processing=False) + # self.mesh = o3d.io.read_triangle_mesh(model_path, enable_post_processing=False) + + print("Open3D去重前顶点数:", len(mesh_native.vertices)) + self.mesh = mesh_native.merge_close_vertices(eps=1e-6) + + vertices2 = np.asarray(self.mesh.vertices) + print("Open3D去重后顶点数:", len(vertices2)) + vertices2_sorted = sorted( + vertices2.tolist(), + key=lambda x: (x[0], x[1], x[2]) + ) + + if not self.mesh.has_vertex_colors(): + num_vertices = len(self.mesh.vertices) + self.mesh.vertex_colors = o3d.utility.Vector3dVector( + np.ones((num_vertices, 3)) + ) + + self.uv_array = np.asarray(self.mesh.triangle_uvs) + # print(f"UV 坐标形状:{self.uv_array.shape}, {self.uv_array[0][1]}") + + def calculate_rotation_and_center_of_mass(self, angle_x, angle_y, angle_z, points): + """计算某一组旋转角度后的重心""" + # 计算绕X轴、Y轴和Z轴的旋转矩阵 + R_x = np.array([ + [1, 0, 0], + [0, np.cos(np.radians(angle_x)), -np.sin(np.radians(angle_x))], + [0, np.sin(np.radians(angle_x)), np.cos(np.radians(angle_x))] + ]) + + R_y = np.array([ + [np.cos(np.radians(angle_y)), 0, np.sin(np.radians(angle_y))], + [0, 1, 0], + [-np.sin(np.radians(angle_y)), 0, np.cos(np.radians(angle_y))] + ]) + + R_z = np.array([ + [np.cos(np.radians(angle_z)), -np.sin(np.radians(angle_z)), 0], + [np.sin(np.radians(angle_z)), np.cos(np.radians(angle_z)), 0], + [0, 0, 1] + ]) + + # 综合旋转矩阵 + R = R_z @ R_y @ R_x + + # 执行旋转 + rotated_points = points @ R.T + + # 计算最小z值 + min_z = np.min(rotated_points[:, 2]) + + # 计算平移向量,将最小Z值平移到0 + translation_vector = np.array([0, 0, -min_z]) + rotated_points += translation_vector + + # 计算重心 + center_of_mass = np.mean(rotated_points, axis=0) + + return center_of_mass[2], angle_x, angle_y, angle_z + + def parallel_rotation(self, angle_step=3): + """顺序计算最优旋转角度(单线程)""" + min_center_of_mass_y = float('inf') + best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 + + # 遍历所有角度组合 + for angle_x in range(0, 360, angle_step): + for angle_y in range(0, 360, angle_step): + for angle_z in range(0, 360, angle_step): + center_of_mass_z, ax, ay, az = self.calculate_rotation_and_center_of_mass( + angle_x, angle_y, angle_z, self.mesh.vertices + ) + if center_of_mass_z < min_center_of_mass_y: + min_center_of_mass_y = center_of_mass_z + best_angle_x, best_angle_y, best_angle_z = ax, ay, az + + return best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y + + def process(self): + """执行完整处理流程""" + self.load_model() + + try: + start = time.time() + + # mesh = o3d.geometry.TriangleMesh() + # mesh.vertices = o3d.utility.Vector3dVector(np.random.rand(100, 3)) + # points = np.asarray(mesh.vertices) + + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(self.mesh.vertices) + + # 自动计算合理体素大小 + raw_points = np.asarray(pcd.points) + bounds = np.ptp(raw_points, axis=0) + voxel_size = np.max(bounds) / 50 # 默认取最大边长的2% + + # 执行下采样并验证 + pcd_downsampled = pcd.voxel_down_sample(voxel_size) + if len(pcd_downsampled.points) < 10: # 最少保留10个点 + raise RuntimeError(f"下采样失败:voxel_size={voxel_size:.3f}过大") + + print(f"下采样后点数: {len(pcd_downsampled.points)} (voxel_size={voxel_size:.3f})") + + # pcd.paint_uniform_color([1,0,0]) # 原始红色 + # pcd_downsampled.paint_uniform_color([0,0,1]) # 采样后蓝色 + # o3d.visualization.draw_geometries([pcd, pcd_downsampled]) + + # 继续后续处理 + points = np.asarray(pcd_downsampled.points) + + best_angle_x, best_angle_y, best_angle_z, min_z = parallel_rotation2(points, angle_step=5) + print("best=", best_angle_x, best_angle_y, best_angle_z, min_z) + print(time.time() - start) + + except Exception as e: + print(f"Error during processing: {str(e)}") + raise + + +if __name__ == "__main__": + ModelProcessor().process() \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..06a520a --- /dev/null +++ b/test.py @@ -0,0 +1,19 @@ +import requests +import json + +printId = 84258 +url = f"https://mp.api.suwa3d.com/api/printOrder/infoByPrintId?printId={printId}" +res = requests.get(url) +print(res) + +datas = res.json()["data"]["layout"] +print(datas) +angle_x = datas.get("angle_x",0) +angle_y = datas.get("angle_y",0) +angle_z = datas.get("angle_z",0) +layout_z = datas.get("layout_z",0) +print("angle_x",angle_x) +print("angle_y",angle_y) +print("angle_z",angle_z) +print("layout_z",layout_z) +# TODO 解析 res diff --git a/test_load_json.py b/test_load_json.py new file mode 100644 index 0000000..13c3c78 --- /dev/null +++ b/test_load_json.py @@ -0,0 +1,482 @@ +import open3d as o3d +import numpy as np +import json +import os +from PIL import Image +import argparse + +def custom_mesh_transform(vertices, transform_matrix): + """ + 手动实现网格变换:对每个顶点应用齐次变换矩阵 + 参数: + vertices: 网格顶点数组 (N, 3) + transform_matrix: 4x4 齐次变换矩阵 + 返回: + 变换后的顶点数组 (N, 3) + """ + # 1. 顶点转齐次坐标 (N, 3) → (N, 4) + homogeneous_vertices = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) + + # 2. 应用变换矩阵:矩阵乘法 (4x4) * (4xN) → (4xN) + transformed_homogeneous = transform_matrix @ homogeneous_vertices.T + + # 3. 转回非齐次坐标 (3xN) → (N, 3) + transformed_vertices = transformed_homogeneous[:3, :].T + return transformed_vertices + +def load_and_transform_models(base_path, dict_origin, blank_path, json_name): + + meshes = [] # 存储所有变换后的网格 + + json_path = os.path.join(base_path, json_name) + + """ + 加载JSON文件,读取所有模型信息,应用变换后返回模型列表 + """ + # 检查JSON文件是否存在 + if not os.path.exists(json_path): + print(f"错误: JSON文件不存在 - {json_path}") + return [] + + # 读取JSON文件 + try: + with open(json_path, 'r') as f: + data = json.load(f) + except Exception as e: + print(f"读取JSON文件失败: {e}") + return [] + + # 处理每个模型 + for model in data.get('models', []): + obj_name = model.get('file_name', '') + obj_path = os.path.join(base_path, obj_name) + + # 检查文件路径是否存在 + if not obj_path: + print("警告: 跳过缺少文件路径的模型") + continue + + # print("load path", obj_path) + # 检查文件是否存在 + if not os.path.exists(obj_path): + print(f"警告: 模型文件不存在 - {obj_path}") + continue + + # 加载网格 + try: + # print("dict_origin obj_path=", obj_path) + key = obj_path + if key in dict_origin: + # print("key in dict_origin") + mesh = dict_origin[key] + else : + # print("key not in dict_origin") + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + continue + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + continue + + transform = model.get('transform', {}) + + homo_matrix = transform["homo_matrix"] # 获取存储的列表 + reconstructed_matrix = np.array(homo_matrix, dtype=np.float64) + + # 手动变换顶点 + original_vertices = np.asarray(mesh.vertices) + transformed_vertices = custom_mesh_transform(original_vertices, reconstructed_matrix) + mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices) + + # 添加到列表 + meshes.append(mesh) + print(f"已加载并变换: {os.path.basename(obj_path)}") + + add_plank = True + if add_plank: + # obj_path = os.path.join(blank_dir, "blank2.obj") + obj_path = blank_path + print("add_plank",obj_path) + + try: + mesh = o3d.io.read_triangle_mesh(obj_path, enable_post_processing=True) + if not mesh.has_vertices(): + print(f"警告: 网格无有效顶点 - {obj_path}") + except Exception as e: + print(f"加载模型失败: {obj_path} - {e}") + + rotation = [0.0, 0.0, 0.0] + mesh_center = compute_mesh_center(mesh.vertices) + + R_x = mesh.get_rotation_matrix_from_axis_angle(np.array([1, 0, 0]) * np.radians(rotation[0])) + mesh.rotate(R_x,center=mesh_center) + R_y = mesh.get_rotation_matrix_from_axis_angle(np.array([0, 1, 0]) * np.radians(rotation[1])) + mesh.rotate(R_y,center=mesh_center) + R_z = mesh.get_rotation_matrix_from_axis_angle(np.array([0, 0, 1]) * np.radians(rotation[2])) + mesh.rotate(R_z,center=mesh_center) + + displacement = [0.0, 0.0, 0.0] + displacement = np.asarray(displacement) + mesh.translate(displacement) + + meshes.append(mesh) + + # print(f"已加载并变换: {obj_path}") + + return meshes + +def compute_mesh_center(vertices): + if len(vertices) == 0: + raise ValueError("顶点数组不能为空") + + n = len(vertices) # 顶点数量 + # 初始化坐标累加器 + sum_x, sum_y, sum_z = 0.0, 0.0, 0.0 + + # 遍历所有顶点累加坐标值 + for vertex in vertices: + sum_x += vertex[0] + sum_y += vertex[1] + sum_z += vertex[2] + + # 计算各坐标轴的平均值 + centroid = np.array([sum_x / n, sum_y / n, sum_z / n]) + return centroid + +def load_and_show(base_path, blank_dir, json_name="3DPrintLayout.json"): + # 加载并变换所有模型 + transformed_meshes = load_and_transform_models(base_path, {}, blank_dir, json_name) + + if not transformed_meshes: + print("没有加载到任何模型,请检查错误信息") + else: + print(f"成功加载并变换了 {len(transformed_meshes)} 个模型") + + # 可视化所有模型 + print("显示所有模型... (按'Q'退出)") + + try: + from packaging import version + o3d_version = version.parse(o3d.__version__) + # 新版本 draw_geometries 参数 + if o3d_version >= version.parse("0.13.0"): + o3d.visualization.draw_geometries( + transformed_meshes, + window_name="模型展示", + mesh_show_back_face=True, + mesh_show_wireframe=False + ) + # 旧版本 draw_geometries 参数 + else: + o3d.visualization.draw_geometries( + transformed_meshes, + window_name="模型展示", + point_show_normal=False, + mesh_show_back_face=True + ) + except Exception as e: + print(f"使用 draw_geometries 可视化失败: {e}") + +def setup_orthographic_camera(vis, meshes, ortho_width=15.0, camera_height=20.0): + """ + 设置精确的正交投影相机 + """ + view_control = vis.get_view_control() + + # 计算场景边界框以确定合适的正交参数 + all_points = [] + for mesh in meshes: + if hasattr(mesh, 'vertices'): + points = np.asarray(mesh.vertices) + else: + points = np.asarray(mesh.points) + all_points.append(points) + + if all_points: + all_points = np.vstack(all_points) + bbox_min = np.min(all_points, axis=0) + bbox_max = np.max(all_points, axis=0) + scene_center = (bbox_min + bbox_max) / 2 + scene_size = np.max(bbox_max - bbox_min) + + # 设置观察点为场景中心 + view_control.set_lookat(scene_center) + + # 设置相机在场景上方,俯视场景 + view_control.set_front([0, 0, -1]) # 看向负Z轴(从上向下) + view_control.set_up([0, 1, 0]) # Y轴向上 + + try: + # 启用正交投影 + view_control.set_orthogonal(True) + # 根据场景大小设置正交投影宽度 + view_control.set_orthogonal_width(max(scene_size * 1.2, ortho_width)) + print(f"正交投影已设置: 宽度={max(scene_size * 1.2, ortho_width):.2f}") + except AttributeError: + # 回退到透视投影模拟 + view_control.set_zoom(0.1) + print("使用透视投影模拟正交效果") + + return True + +def auto_fit_to_view(vis, meshes): + """ + 自动调整视图以显示所有模型 + """ + view_control = vis.get_view_control() + + # 方法1: 使用 Open3D 的自动适配功能 + try: + view_control.fit_to_geometry(meshes) + print("已自动适配视图以显示所有模型") + return True + except: + pass + + # 方法2: 手动计算并设置 + all_points = [] + for mesh in meshes: + if hasattr(mesh, 'vertices'): + points = np.asarray(mesh.vertices) + elif hasattr(mesh, 'points'): + points = np.asarray(mesh.points) + else: + continue + all_points.append(points) + + if all_points: + all_points = np.vstack(all_points) + bbox_min = np.min(all_points, axis=0) + bbox_max = np.max(all_points, axis=0) + scene_center = (bbox_min + bbox_max) / 2 + scene_size = np.max(bbox_max - bbox_min) + + # 设置合适的视角和缩放 + view_control.set_lookat(scene_center) + + # 根据场景大小设置 zoom + zoom_level = max(0.05, min(1.0, 10.0 / scene_size)) + zoom_level = 0.5 + view_control.set_zoom(zoom_level) + + print(f"手动适配视图: 场景大小 {scene_size:.2f}, zoom {zoom_level:.3f}") + return True + + return False + +def set_orthographic_camera(view_control, desired_width=1920, desired_height=1080): + """ + 通过相机参数设置正交投影,并确保尺寸匹配 + """ + + # 更可靠的方式:使用你创建窗口时已知的尺寸 + # 假设你创建窗口时使用的是 desired_width 和 desired_height + param = view_control.convert_to_pinhole_camera_parameters() + + # 修改内参,确保使用与创建窗口时一致的尺寸 + param.intrinsic.set_intrinsics( + width=desired_width, # 必须与create_window的width一致 + height=desired_height, # 必须与create_window的height一致 + fx=1000.0, # 对于正交投影,此值意义不同,但仍需合理设置 + fy=1000.0, # 避免使用1.0这样的极端值 + cx=desired_width / 2, + cy=desired_height / 2 + ) + + # 同时,仍需通过ViewControl启用正交投影 + view_control.set_orthogonal(True) + view_control.convert_from_pinhole_camera_parameters(param) + +def set_orthographic_projection(view_control, ortho_scale=10.0): + """ + 尝试使用 ViewControl 接口配置正交投影效果。 + + 参数: + vis: Open3D 可视化器实例 + ortho_scale: 控制正交投影的“视野”大小,值越大,场景中的物体看起来越小。 + """ + + try: + # 1. 尝试设置一个非常小的视野(Field of View)来减少透视感 + # 注意:此方法可能在某些版本中效果不明显,但它是可用的最接近正交投影的直接控制 + view_control.change_field_of_view(step=-5) # 尝试将FOV调到极小[1](@ref) + + # 2. 设置固定的近、远裁剪平面,避免因自动计算带来的透视变形 + # 这对于保持缩放时物体大小一致很重要 + view_control.set_constant_z_near(0.1) # 设置一个固定的近平面 + view_control.set_constant_z_far(1000.0) # 设置一个固定的远平面[1](@ref) + + # 3. 使用一个较小的缩放值(Zoom),这有助于让视角更“平行” + # 在正交投影的模拟中,通常使用较大的zoom值(>1)来拉远相机,减少透视效果。 + # 但根据实际测试,您可能需要尝试一个具体的值,例如 0.5 或 0.1 + view_control.set_zoom(0.46) # 这个值需要根据你的场景进行调整[6,7](@ref) + + # print("已尝试通过 ViewControl 配置正交投影参数。") + return True + + except Exception as e: + print(f"在配置 ViewControl 参数时出错: {e}") + return False + +def set_orthographic(meshes, output_path, width=1920, height=1080, + background_color=[1, 1, 1], camera_position=None, + ortho_width=None, zoom=1.0): + + vis = o3d.visualization.Visualizer() + vis.create_window(width=width, height=height, visible=False) + + # 添加几何体 + for mesh in meshes: + vis.add_geometry(mesh) + + # 设置渲染选项 + render_opt = vis.get_render_option() + render_opt.background_color = np.asarray(background_color) + render_opt.mesh_show_back_face = True + render_opt.mesh_show_wireframe = False + render_opt.point_size = 3.0 + + # 视角控制 + view_control = vis.get_view_control() + + # 计算所有网格的合并边界框,用于自适应设置投影参数 + all_points = [] + for mesh in meshes: + if hasattr(mesh, 'vertices'): + points = np.asarray(mesh.vertices) + elif hasattr(mesh, 'points'): + points = np.asarray(mesh.points) + else: + continue + if len(points) > 0: + all_points.append(points) + + if len(all_points) > 0: + all_points = np.vstack(all_points) + bbox_min = np.min(all_points, axis=0) + bbox_max = np.max(all_points, axis=0) + bbox_center = (bbox_min + bbox_max) / 2.0 + bbox_size = np.max(bbox_max - bbox_min) + + # 设置相机视角,看向场景中心 + if not camera_position: + # 默认顶视图 + view_control.set_front([0, 0, 1]) + view_control.set_lookat(bbox_center) + view_control.set_up([0, 1, 0]) + else: + view_control.set_front(camera_position['front']) + view_control.set_lookat(camera_position['lookat']) + view_control.set_up(camera_position['up']) + + # 自适应设置正交投影宽度,考虑屏幕宽高比 + if ortho_width is None: + aspect_ratio = width / height + ortho_width = bbox_size * 1.5 + # 根据宽高比调整,确保场景适合窗口 + if aspect_ratio > 1: + ortho_width *= aspect_ratio + if ortho_width <= 0: + ortho_width = 10.0 + else: + # 如果没有顶点,使用默认值 + if not camera_position: + view_control.set_front([0, 0, 1]) + view_control.set_lookat([0, 0, 0]) + view_control.set_up([0, 1, 0]) + ortho_width = ortho_width or 10.0 + + for _ in range(2): + vis.poll_events() + vis.update_renderer() + + # 设置正交投影 + try: + set_orthographic_camera(view_control) + except AttributeError: + set_orthographic_projection(view_control, ortho_scale=15.0) + + for _ in range(5): + vis.poll_events() + vis.update_renderer() + + return vis + +def render_to_texture(meshes, output_path, width=1920, height=1080, + background_color=[1, 1, 1], camera_position=None, + ortho_width=None, zoom=1.0): + + + vis = set_orthographic(meshes, output_path) + + # 渲染并保存 + vis.capture_screen_image(output_path, do_render=True) + + print(f"高级渲染图片已保存到: {output_path}") + + return vis + +def load_show_save(base_path, dict_origin, blank_path, batch_id, is_show=False): + + folder_name = batch_id + + # base_path = f"{print_factory_type_dir}/{folder_name}/" # 替换为实际路径 + #json_name = "3DPrintLayout.json" + json_name = f"{batch_id}.json" + output_image_path = os.path.join(base_path, f"{folder_name}.jpg") + + # 加载并变换所有模型 + transformed_meshes = load_and_transform_models(base_path, dict_origin, blank_path, json_name) + + if not transformed_meshes: + print("没有加载到任何模型,请检查错误信息") + else: + print(f"成功加载并变换了 {len(transformed_meshes)} 个模型") + + render_to_texture(transformed_meshes, output_image_path, background_color=[0.9, 0.9, 0.9]) + + if is_show: + # 可视化所有模型 + print("显示所有模型... (按'Q'退出)") + + vis = o3d.visualization.Visualizer() + vis.create_window(window_name="正交投影模型展示") + + # 添加所有网格到可视化器 + for mesh in transformed_meshes: + vis.add_geometry(mesh) + + vis.poll_events() + vis.update_renderer() + + # 获取渲染选项并设置 + render_option = vis.get_render_option() + render_option.background_color = np.array([0.9, 0.9, 0.9]) # 浅灰色背景 + render_option.mesh_show_back_face = True + render_option.mesh_show_wireframe = False + + # 更新渲染器 + vis.update_renderer() + + # 运行可视化 + vis.run() + vis.destroy_window() + +# 主程序 +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("--batch_id", type=str, required=True, help="batch_id") + args = parser.parse_args() + + # batch_id = args.batch_id + batch_id = "9" # 1113-MY-4 + + print_factory_type_dir="/root/print_factory_type" + base_path = f"{print_factory_type_dir}/{batch_id}/" + # blank_path = "{print_factory_type_dir}/blank/blank_bias/blank2.obj" + blank_path = f"{print_factory_type_dir}/blank/blank_bias/blank_small.obj" + + load_show_save(base_path, {}, blank_path, batch_id, True) + \ No newline at end of file diff --git a/x_y_min_test.py b/x_y_min_test.py new file mode 100644 index 0000000..bca4e4d --- /dev/null +++ b/x_y_min_test.py @@ -0,0 +1,52 @@ +import os +import shutil +import time +import random +import matplotlib.pyplot as plt +import open3d as o3d +import numpy as np + + + +# ply_read_path="/data/datasets_20t/type_setting_test_data/print_bounds_compact_data/88884_253283_P65951_6cm_x1=7.811+11.043+25.699.ply" +# # 读取点云 +# pcd = o3d.io.read_point_cloud(ply_read_path) +# +# # 获取点云的点数据 +# points = np.asarray(pcd.points) +# +# # 计算质心 +# centroid = np.mean(points, axis=0) +# +# # 计算 Y 轴最小值 +# min_y_value = np.min(points[:, 1]) # Y 轴最小值 +# max_y_value = np.max(points[:, 1]) +# +# # 计算 X 轴最小值 +# min_x_value = np.min(points[:, 0]) # X 轴最小值 +# +# print(f'min_x_value{min_x_value}') +# min_x_value -385.08287729332403 +# +ply_read_path="/data/datasets_20t/type_setting_test_data/print_bounds_compact_data/456450_260316_P65976_2.66cm_x1=21.778+22.904+26.333.ply" +# 读取点云 +pcd = o3d.io.read_point_cloud(ply_read_path) + +# 获取点云的点数据 +points = np.asarray(pcd.points) + +# 计算质心 +centroid = np.mean(points, axis=0) + +# 计算 Y 轴最小值 +min_y_value = np.min(points[:, 1]) # Y 轴最小值 +max_y_value = np.max(points[:, 1]) + +# 计算 X 轴最小值 +min_x_value = np.min(points[:, 0]) # X 轴最小值 + +print(f'min_x_value{min_x_value}') +# min_x_value -385.08287729332403 +print(f'min_y_value{min_y_value}') + +# -339 \ No newline at end of file diff --git a/读写时间测试.py b/读写时间测试.py new file mode 100644 index 0000000..54e0813 --- /dev/null +++ b/读写时间测试.py @@ -0,0 +1,152 @@ +# import numpy as np +# def calculate_rotation_and_center_of_mass(angle_x, angle_y, angle_z, points): +# """计算某一组旋转角度后的重心""" +# # 计算绕X轴、Y轴和Z轴的旋转矩阵 +# R_x = np.array([ +# [1, 0, 0], +# [0, np.cos(np.radians(angle_x)), -np.sin(np.radians(angle_x))], +# [0, np.sin(np.radians(angle_x)), np.cos(np.radians(angle_x))] +# ]) +# +# R_y = np.array([ +# [np.cos(np.radians(angle_y)), 0, np.sin(np.radians(angle_y))], +# [0, 1, 0], +# [-np.sin(np.radians(angle_y)), 0, np.cos(np.radians(angle_y))] +# ]) +# +# R_z = np.array([ +# [np.cos(np.radians(angle_z)), -np.sin(np.radians(angle_z)), 0], +# [np.sin(np.radians(angle_z)), np.cos(np.radians(angle_z)), 0], +# [0, 0, 1] +# ]) +# +# # 综合旋转矩阵 +# R = R_z @ R_y @ R_x +# +# # 执行旋转 +# rotated_points = points @ R.T +# +# # 计算最小z值 +# min_z = np.min(rotated_points[:, 2]) +# +# # 计算平移向量,将最小Z值平移到0 +# translation_vector = np.array([0, 0, -min_z]) +# rotated_points += translation_vector +# +# # 计算重心 +# center_of_mass = np.mean(rotated_points, axis=0) +# +# return center_of_mass[2], angle_x, angle_y, angle_z +# +# def parallel_rotation(points, angle_step=3): +# """顺序计算最优旋转角度(单线程)""" +# min_center_of_mass_y = float('inf') +# best_angle_x, best_angle_y, best_angle_z = 0, 0, 0 +# +# # 遍历所有角度组合 +# for angle_x in range(0, 360, angle_step): +# for angle_y in range(0, 360, angle_step): +# for angle_z in range(0, 360, angle_step): +# center_of_mass_z, ax, ay, az = calculate_rotation_and_center_of_mass( +# angle_x, angle_y, angle_z, points +# ) +# if center_of_mass_z < min_center_of_mass_y: +# min_center_of_mass_y = center_of_mass_z +# best_angle_x, best_angle_y, best_angle_z = ax, ay, az +# +# return best_angle_x, best_angle_y, best_angle_z, min_center_of_mass_y + + +import bpy +import time +import os +from pathlib import Path + + +def clear_scene(): + """清空当前场景中的所有对象""" + bpy.ops.object.select_all(action='SELECT') + bpy.ops.object.delete() + + +def test_bpy_io(obj_path, output_path): + """测试 bpy 的 OBJ 读写性能""" + # 读取 OBJ + start_time = time.time() + bpy.ops.import_scene.obj(filepath=obj_path) + read_time = time.time() - start_time + + # 确保场景中有对象 + if not bpy.context.scene.objects: + raise ValueError("未成功导入 OBJ 文件") + + # 写入 OBJ + start_time = time.time() + bpy.ops.export_scene.obj( + filepath=output_path, + use_selection=False, # 导出所有对象 + use_materials=False, # 不导出材质(加快速度) + ) + write_time = time.time() - start_time + + # 清理场景 + clear_scene() + + return write_time, read_time + + +def test_folder_objs_with_bpy(folder_path, output_folder="output_objs_bpy"): + """测试文件夹中所有 OBJ 文件的读写性能(使用 bpy)""" + # 确保输出文件夹存在 + Path(output_folder).mkdir(exist_ok=True) + + # 收集所有 OBJ 文件 + obj_files = [f for f in os.listdir(folder_path) if f.lower().endswith('.obj')] + + if not obj_files: + print(f"在文件夹 {folder_path} 中未找到 OBJ 文件") + return + + print(f"找到 {len(obj_files)} 个 OBJ 文件,开始测试...") + + results = [] + + for obj_file in obj_files: + input_path = os.path.join(folder_path, obj_file) + output_path = os.path.join(output_folder, f"bpy_{obj_file}") + + print(f"\n测试文件: {obj_file}") + + try: + write_time, read_time = test_bpy_io(input_path, output_path) + file_size = os.path.getsize(input_path) / (1024 * 1024) # MB + + print(f" 文件大小: {file_size:.2f} MB") + print(f" bpy 读取时间: {read_time:.3f}s") + print(f" bpy 写入时间: {write_time:.3f}s") + + results.append({ + "filename": obj_file, + "size_mb": file_size, + "read_time": read_time, + "write_time": write_time, + }) + + except Exception as e: + print(f" 处理 {obj_file} 时出错: {e}") + + # 计算平均时间 + if results: + avg_read = sum(r["read_time"] for r in results) / len(results) + avg_write = sum(r["write_time"] for r in results) / len(results) + print("\n=== 汇总结果 ===") + print(f"平均读取时间: {avg_read:.3f}s") + print(f"平均写入时间: {avg_write:.3f}s") + + +if __name__ == "__main__": + # 设置包含OBJ文件的文件夹路径 + obj_folder = "/data/datasets_20t/9_big/" # 替换为你的OBJ文件夹路径 + + # 运行测试 + test_folder_objs_with_bpy(obj_folder) diff --git a/读写时间测试2.py b/读写时间测试2.py new file mode 100644 index 0000000..25bc71a --- /dev/null +++ b/读写时间测试2.py @@ -0,0 +1,98 @@ +import os +import time +import open3d as o3d +import trimesh +from pathlib import Path + + +def test_folder_objs(folder_path, output_folder="output_objs"): + """测试文件夹中所有OBJ文件的读写性能""" + # 确保输出文件夹存在 + Path(output_folder).mkdir(exist_ok=True) + + # 收集文件夹中所有OBJ文件 + obj_files = [f for f in os.listdir(folder_path) if f.lower().endswith('.obj')] + + if not obj_files: + print(f"在文件夹 {folder_path} 中未找到OBJ文件") + return + + print(f"找到 {len(obj_files)} 个OBJ文件,开始测试...") + + # 准备结果记录 + results = [] + + for obj_file in obj_files: + file_path = os.path.join(folder_path, obj_file) + output_path = os.path.join(output_folder, obj_file) + + print(f"\n测试文件: {obj_file}") + + # 测试open3d + o3d_write, o3d_read = test_open3d_io(file_path, output_path.replace('.obj', '_o3d.obj')) + + # 测试trimesh + tm_write, tm_read = test_trimesh_io(file_path, output_path.replace('.obj', '_tm.obj')) + + # 记录结果 + file_stats = { + 'filename': obj_file, + 'o3d_write': o3d_write, + 'o3d_read': o3d_read, + 'tm_write': tm_write, + 'tm_read': tm_read, + 'write_ratio': o3d_write / tm_write if tm_write > 0 else 0, + 'read_ratio': o3d_read / tm_read if tm_read > 0 else 0 + } + results.append(file_stats) + + # 打印当前文件结果 + print(f" open3d | 写入: {o3d_write:.3f}s | 读取: {o3d_read:.3f}s") + print(f" trimesh | 写入: {tm_write:.3f}s | 读取: {tm_read:.3f}s") + print(f" 写入速度比(trimesh/open3d): {file_stats['write_ratio']:.1f}x") + print(f" 读取速度比(trimesh/open3d): {file_stats['read_ratio']:.1f}x") + + # 打印汇总结果 + print("\n=== 汇总结果 ===") + avg_write_ratio = sum(r['write_ratio'] for r in results) / len(results) + avg_read_ratio = sum(r['read_ratio'] for r in results) / len(results) + print(f"平均写入速度比(trimesh/open3d): {avg_write_ratio:.1f}x") + print(f"平均读取速度比(trimesh/open3d): {avg_read_ratio:.1f}x") + + +def test_open3d_io(input_path, output_path): + """测试open3d的读写性能""" + # 读取 + start = time.time() + mesh = o3d.io.read_triangle_mesh(input_path) + read_time = time.time() - start + + # 写入 + start = time.time() + o3d.io.write_triangle_mesh(output_path, mesh) + write_time = time.time() - start + + return write_time, read_time + + +def test_trimesh_io(input_path, output_path): + """测试trimesh的读写性能""" + # 读取 + start = time.time() + mesh = trimesh.load(input_path) + read_time = time.time() - start + + # 写入 + start = time.time() + mesh.export(output_path) + write_time = time.time() - start + + return write_time, read_time + + +if __name__ == "__main__": + # 设置包含OBJ文件的文件夹路径 + obj_folder = "/data/datasets_20t/9_big/" # 替换为你的OBJ文件夹路径 + + # 运行测试 + test_folder_objs(obj_folder) \ No newline at end of file