From 1262b65412505ad12492eb7ece9215d753a5c77d Mon Sep 17 00:00:00 2001 From: dongchangxi <458593490@qq.com> Date: Thu, 27 Nov 2025 21:05:10 +0800 Subject: [PATCH] =?UTF-8?q?bug=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- script/factory_sliceing/download_print_out.py | 29 ++++++-- script/factory_sliceing/type_setting_order.py | 69 ++++++++++++++++--- 2 files changed, 84 insertions(+), 14 deletions(-) diff --git a/script/factory_sliceing/download_print_out.py b/script/factory_sliceing/download_print_out.py index b742b80..4fd634c 100644 --- a/script/factory_sliceing/download_print_out.py +++ b/script/factory_sliceing/download_print_out.py @@ -55,18 +55,31 @@ class DataTransfer: # 列出所有对象 objects = [] prefix = self.oss_path.lstrip('/') # 移除开头的 '/' 以匹配 OSS 格式 - + ishaveCodeJpg = False for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): if obj.key != prefix: # 跳过目录本身 - objects.append(obj.key) + print(f"obj.key={obj.key}") + if ".jpg" in obj.key: + if obj.key.endswith(f"printId_{json_model_info.print_id}Tex1.jpg"): + objects.append(obj.key) + ishaveCodeJpg = True + else: + objects.append(obj.key) + + if not ishaveCodeJpg: + for obj in oss2.ObjectIterator(self.oss_client, prefix=prefix): + if obj.key == f"{json_model_info.pid}Tex1.jpg": + objects.append(obj.key) + break + print(f"objects={objects}") # 下载所有文件,添加进度条 for obj_key in tqdm(objects, desc="下载进度"): if obj_key.endswith('/'): continue - if "printId" in obj_key: - continue + # if "printId" in obj_key: + # continue # 计算相对路径 rel_path = obj_key[len(prefix):].lstrip('/') @@ -76,6 +89,9 @@ class DataTransfer: # 根据文件后缀名进行重命名 if file_ext.lower() in ['.mtl', '.jpg', '.jpeg', '.png']: + if file_name.endswith(f"printId_{json_model_info.print_id}Tex1.jpg"): + file_name = file_name.replace(f"printId_{json_model_info.print_id}Tex1.jpg", f"{json_model_info.pid}Tex1.jpg") + # 对于.mtl和图片文件,在原名前加order_id new_file_name = f"{json_model_info.order_id}_{file_name}" # new_file_name = file_name @@ -186,6 +202,7 @@ class JSONModelInfo: order_id: str pid: str model_height: str + print_id: str def read_pids_from_json(pid_file): """从文件读取所有PID""" @@ -230,6 +247,7 @@ def read_pids_from_json(pid_file): parts = obj_name.split('_') order_id = parts[0] + print_id = parts[2].replace("P", "") pid = parts[1] model_height = parts[3] @@ -237,7 +255,8 @@ def read_pids_from_json(pid_file): obj_name=obj_name, order_id=order_id, pid=pid, - model_height=model_height + model_height=model_height, + print_id=print_id ) list_model_info.append(model_info) diff --git a/script/factory_sliceing/type_setting_order.py b/script/factory_sliceing/type_setting_order.py index 611da4d..d45dbf8 100644 --- a/script/factory_sliceing/type_setting_order.py +++ b/script/factory_sliceing/type_setting_order.py @@ -53,7 +53,40 @@ class RedisClientSingleton: def get_client(self): if self._client is None: - self._client = redis.Redis(host='mp.api.suwa3d.com', password='kcV2000', port=6379, db=6) + # 添加超时参数,防止连接超时 + # socket_timeout: 每次操作的超时时间(秒) + # socket_connect_timeout: 连接超时时间(秒) + # socket_keepalive: 启用 TCP keepalive + # socket_keepalive_options: keepalive 选项 + self._client = redis.Redis( + host='mp.api.suwa3d.com', + password='kcV2000', + port=6379, + db=6, + socket_timeout=30, # 操作超时30秒 + socket_connect_timeout=10, # 连接超时10秒 + socket_keepalive=True, # 启用 TCP keepalive + socket_keepalive_options={}, # keepalive 选项 + health_check_interval=30 # 健康检查间隔30秒 + ) + else: + # 检查连接是否有效,如果断开则重新连接 + try: + self._client.ping() + except (redis.ConnectionError, redis.TimeoutError, AttributeError): + # 连接断开,重新创建连接 + self._client = None + self._client = redis.Redis( + host='mp.api.suwa3d.com', + password='kcV2000', + port=6379, + db=6, + socket_timeout=30, + socket_connect_timeout=10, + socket_keepalive=True, + socket_keepalive_options={}, + health_check_interval=30 + ) return self._client @@ -67,12 +100,20 @@ def redisClient(): def requestApiToUpdateSliceStatus(versionId,downloadCounts): api_url = f"{url}/api/printTypeSettingOrder/updateBatchSliceing?batch_id={versionId}&download_counts="+str(downloadCounts) print(f'发起状态变更请求url={api_url}, versionId={versionId}') - res = requests.post(api_url) - if res.status_code != 200: - print(f'状态变更请求失败, res={res.text}') + try: + # 添加超时参数,防止请求时间过长 + res = requests.post(api_url, timeout=60) # 60秒超时 + if res.status_code != 200: + print(f'状态变更请求失败, res={res.text}') + return False + print(f'状态变更请求成功, res={res.text}') + return True + except requests.exceptions.Timeout: + print(f'状态变更请求超时, url={api_url}') + return False + except requests.exceptions.RequestException as e: + print(f'状态变更请求异常, error={str(e)}') return False - print(f'状态变更请求成功, res={res.text}') - return True #判断是否上传了 JSON 文件 def step1(versionId): @@ -144,6 +185,7 @@ def step2(jsonFilePath,folder, versionId): # 直接调用函数,而不是通过os.system执行python命令 # 这样可以避免在打包成exe后找不到python命令的问题 workdir = os.path.join(folder, 'data') + print(f"模型数据存储目录={workdir}, jsonFilePath={jsonFilePath}") download_transform_save_by_json(jsonFilePath, workdir, '') @@ -186,11 +228,11 @@ def main(work_dir=None): versionId = str(data) print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} 正在处理版次ID={versionId}') - #执行前,先删除可能存在的旧数据目录(包括 _big 和 _small) + #执行前,先删除可能存在的旧数据目录(包括 _big 和 _small),删除整个目录 for suffix in ['_big', '_small']: - objFilePath = os.path.join(currentDir, 'batchPrint', versionId + suffix, 'data') + objFilePath = os.path.join(currentDir, 'batchPrint', versionId + suffix) if os.path.exists(objFilePath): - shutil.rmtree(objFilePath) + shutil.rmtree(objFilePath, ignore_errors=True) res = step1(versionId) if res == False: @@ -215,6 +257,15 @@ def main(work_dir=None): if file.endswith('.obj'): objCounts += 1 requestApiToUpdateSliceStatus(versionId,objCounts) + + # 在长时间操作后,确保 Redis 连接仍然有效 + # 通过重新获取客户端来触发连接检查 + try: + r = redisClient() + r.ping() # 测试连接 + except Exception as e: + print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} Redis连接检查失败: {str(e)},将在下次循环时自动重连') + #time.sleep(10)