Browse Source

内网服务

master
dongchangxi 6 days ago
parent
commit
7053969422
  1. 71
      factory_sliceing/auto_sliceing_operate/main_begin_sliceing.py
  2. 5
      factory_sliceing/auto_sliceing_operate/main_download_zip.py
  3. 130
      factory_sliceing/auto_sliceing_operate/utils/oss_redis_intranet.py
  4. 10
      factory_sliceing/config.toml
  5. 13
      factory_sliceing/download_batch_data/main_download_batch_data_and_trans.py
  6. 130
      factory_sliceing/download_batch_data/utils/oss_redis_intranet.py

71
factory_sliceing/auto_sliceing_operate/main_begin_sliceing.py

@ -2,6 +2,7 @@ from auto_sliceing_operate.utils.import_all_file import modify_file_dialog_path_ @@ -2,6 +2,7 @@ from auto_sliceing_operate.utils.import_all_file import modify_file_dialog_path_
from auto_sliceing_operate.utils.click_soft_button import clickFileIMportShow, clickBegingSlice,checkIsSliceingText
import time
from auto_sliceing_operate.utils.oss_redis import redisClient
from auto_sliceing_operate.utils.oss_redis_intranet import redisClientIntranet
from auto_sliceing_operate.utils.exe_operate import start_exe, click_confirm, close
from auto_sliceing_operate.utils.logs import log
import os
@ -70,44 +71,44 @@ def BeginSliceing(batchId,machineId, folderPath,data): @@ -70,44 +71,44 @@ def BeginSliceing(batchId,machineId, folderPath,data):
#切片完成之后,将切片文件打包成zip文件,上传到OSS,并且请求api 更新批次状态为切片完成
# requestApiToUpdateSliceStatusComplate(batchId, 0)
# sadd 插入对应的队列
r = redisClient()
r.sadd('pb:begin_print_machine_'+machineId, data)
rIntranet = redisClientIntranet()
rIntranet.sadd('pb:begin_print_machine_'+machineId, data)
def main(work_dir=None):
r = redisClient()
while True:
# def main(work_dir=None):
# r = redisClient()
# while True:
#判断队列连接是否正常,进行重连
if not r.ping():
log("队列连接异常,进行重连")
r = redisClient()
time.sleep(10)
continue
# #判断队列连接是否正常,进行重连
# if not r.ping():
# log("队列连接异常,进行重连")
# r = redisClient()
# time.sleep(10)
# continue
data = r.spop('pb:to_sliceing')
if data is None:
log("队列为空")
time.sleep(10)
continue
data = data.decode('utf-8')
# data = "10192_small_No4"
# 判断是否是字符串
if not isinstance(data, str):
log("取出的数据不是字符串")
time.sleep(10)
continue
folderPath = os.path.join(work_dir,"batchPrint", data,"data")
# 判断目录是否存在
if not os.path.exists(folderPath):
log(f"目录不存在: {folderPath}")
time.sleep(10)
continue
# data = r.spop('pb:to_sliceing')
# if data is None:
# log("队列为空")
# time.sleep(10)
# continue
# data = data.decode('utf-8')
# # data = "10192_small_No4"
# # 判断是否是字符串
# if not isinstance(data, str):
# log("取出的数据不是字符串")
# time.sleep(10)
# continue
# folderPath = os.path.join(work_dir,"batchPrint", data,"data")
# # 判断目录是否存在
# if not os.path.exists(folderPath):
# log(f"目录不存在: {folderPath}")
# time.sleep(10)
# continue
# data 格式为 batchId_machineType_No machineId
batchId = data.split('_')[0] #批次ID
# machineType = data.split('_')[1] #机型类型
machineId = data.split('_')[2].replace('No', '') #机器ID
BeginSliceing(batchId,machineId,folderPath,data)
time.sleep(10)
# # data 格式为 batchId_machineType_No machineId
# batchId = data.split('_')[0] #批次ID
# # machineType = data.split('_')[1] #机型类型
# machineId = data.split('_')[2].replace('No', '') #机器ID
# BeginSliceing(batchId,machineId,folderPath,data)
# time.sleep(10)

5
factory_sliceing/auto_sliceing_operate/main_download_zip.py

@ -2,6 +2,7 @@ from auto_sliceing_operate.utils import miniIo as mio @@ -2,6 +2,7 @@ from auto_sliceing_operate.utils import miniIo as mio
import zipfile,time
import os
from auto_sliceing_operate.utils.oss_redis import redisClient
from auto_sliceing_operate.utils.oss_redis_intranet import redisClientIntranet
from auto_sliceing_operate.utils.logs import log
from utils.config import cfg
# 打印机旁边的电脑下载切片文件到本地
@ -42,14 +43,14 @@ def main(work_dir=None): @@ -42,14 +43,14 @@ def main(work_dir=None):
os.makedirs(downloadPath)
r = redisClient()
rIntranet = redisClientIntranet()
machineId = getCurrentMachineId()
if machineId is None:
log("获取当前电脑id失败")
exit(0)
while True:
data = r.spop('pb:begin_print_machine_'+machineId)
data = rIntranet.spop('pb:begin_print_machine_'+machineId)
if data is None:
log("队列为空")
time.sleep(10)

130
factory_sliceing/auto_sliceing_operate/utils/oss_redis_intranet.py

@ -0,0 +1,130 @@ @@ -0,0 +1,130 @@
import oss2,redis
import time
import logging
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 连接oss - 单例模式
class OSSClientSingleton:
_instance = None
_client = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(OSSClientSingleton, cls).__new__(cls)
return cls._instance
def get_client(self):
if self._client is None:
AccessKeyId = 'LTAI5tSReWm8hz7dSYxxth8f'
AccessKeySecret = '8ywTDF9upPAtvgXtLKALY2iMYHIxdS'
Endpoint = 'oss-cn-shanghai.aliyuncs.com'
Bucket = 'suwa3d-securedata'
self._client = oss2.Bucket(oss2.Auth(AccessKeyId, AccessKeySecret), Endpoint, Bucket)
return self._client
def ossClient():
"""获取OSS客户端单例"""
return OSSClientSingleton().get_client()
#连接redis,单例模式
class RedisClientSingleton:
_instance = None
_client = None
# Redis连接配置
REDIS_CONFIG = {
'host': '192.168.100.254',
'password': '',
'port': 6379,
'db': 6,
'socket_timeout': 30, # 操作超时30秒
'socket_connect_timeout': 10, # 连接超时10秒
'socket_keepalive': True, # 启用 TCP keepalive
'socket_keepalive_options': {}, # keepalive 选项
'health_check_interval': 30 # 健康检查间隔30秒
}
# 重试配置
RETRY_INTERVAL = 5 # 重试间隔(秒)
MAX_RETRY_INTERVAL = 60 # 最大重试间隔(秒),用于指数退避
def __new__(cls):
if cls._instance is None:
cls._instance = super(RedisClientSingleton, cls).__new__(cls)
return cls._instance
def _create_redis_client(self):
"""创建Redis客户端"""
return redis.Redis(**self.REDIS_CONFIG)
def _connect_with_retry(self, max_retries=None, retry_interval=None):
"""
带重试机制的Redis连接方法
:param max_retries: 最大重试次数None表示无限重试
:param retry_interval: 重试间隔None表示使用默认值支持指数退避
:return: Redis客户端实例
"""
if retry_interval is None:
retry_interval = self.RETRY_INTERVAL
retry_count = 0
current_interval = retry_interval
while True:
try:
logger.info(f"尝试连接Redis (第 {retry_count + 1} 次)...")
client = self._create_redis_client()
# 测试连接
client.ping()
logger.info("Redis连接成功!")
return client
except (redis.ConnectionError, redis.TimeoutError, AttributeError, Exception) as e:
retry_count += 1
error_msg = str(e)
logger.warning(f"Redis连接失败 (第 {retry_count} 次): {error_msg}")
# 如果设置了最大重试次数且已达到,则抛出异常
if max_retries is not None and retry_count >= max_retries:
logger.error(f"达到最大重试次数 {max_retries},停止重试")
raise
# 指数退避:每次重试间隔逐渐增加,但不超过最大值
logger.info(f"等待 {current_interval} 秒后重试...")
time.sleep(current_interval)
current_interval = min(current_interval * 1.5, self.MAX_RETRY_INTERVAL)
def get_client(self):
"""
获取Redis客户端如果连接断开则自动重连带重试机制
:return: Redis客户端实例
"""
if self._client is None:
# 首次连接,使用无限重试直到成功
logger.info("初始化Redis连接...")
self._client = self._connect_with_retry(max_retries=None)
else:
# 检查连接是否有效,如果断开则重新连接(带重试)
try:
self._client.ping()
except (redis.ConnectionError, redis.TimeoutError, AttributeError) as e:
# 连接断开,重新创建连接(使用无限重试直到成功)
logger.warning(f"Redis连接已断开: {str(e)},开始重新连接...")
self._client = None
self._client = self._connect_with_retry(max_retries=None)
return self._client
# def redisClient():
# """获取Redis客户端单例"""
# return RedisClientSingleton().get_client()
def redisClientIntranet():
"""获取内网Redis客户端单例"""
return RedisClientSingleton().get_client()

10
factory_sliceing/config.toml

@ -11,6 +11,16 @@ socket_connect_timeout = 10 @@ -11,6 +11,16 @@ socket_connect_timeout = 10
socket_keepalive = true
health_check_interval = 30
[redis_intranet]
host = "192.168.100.254"
password = ""
port = 6379
db = 6
socket_timeout = 30
socket_connect_timeout = 10
socket_keepalive = true
health_check_interval = 30
# Redis 重试配置
[redis.retry]
retry_interval = 5

13
factory_sliceing/download_batch_data/main_download_batch_data_and_trans.py

@ -1,5 +1,6 @@ @@ -1,5 +1,6 @@
import os,time,sys
from download_batch_data.utils.funcs import requestApiToUpdateSliceStatus
from auto_sliceing_operate.main_begin_sliceing import BeginSliceing
from download_batch_data.utils.oss_redis import redisClient
from download_batch_data.utils.funcs import downloadJsonAndJpgFileAndMoveToCorrectDir, downloadDataByOssAndTransformSave
# 默认使用脚本所在目录
@ -54,10 +55,16 @@ def step1(versionId): @@ -54,10 +55,16 @@ def step1(versionId):
keyValue = f'{versionId}_{machineType}_No{machineInfo["id"]}'
# 小机台才塞入我的切片队列,大机台有自己的切片队列
if machineType == 'small':
r = redisClient()
r.sadd('pb:to_sliceing', keyValue)
return True
BeginSliceing(versionId,machineInfo["id"],objFilePath,keyValue)
#调用切片方法进行切片处理
# # 内网redis
# r = redisClientIntranet()
# r = redisClient()
# r.sadd('pb:to_sliceing', keyValue)
return True
# 读取 队列中一个数据出来

130
factory_sliceing/download_batch_data/utils/oss_redis_intranet.py

@ -0,0 +1,130 @@ @@ -0,0 +1,130 @@
import oss2,redis
import time
import logging
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# 连接oss - 单例模式
class OSSClientSingleton:
_instance = None
_client = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(OSSClientSingleton, cls).__new__(cls)
return cls._instance
def get_client(self):
if self._client is None:
AccessKeyId = 'LTAI5tSReWm8hz7dSYxxth8f'
AccessKeySecret = '8ywTDF9upPAtvgXtLKALY2iMYHIxdS'
Endpoint = 'oss-cn-shanghai.aliyuncs.com'
Bucket = 'suwa3d-securedata'
self._client = oss2.Bucket(oss2.Auth(AccessKeyId, AccessKeySecret), Endpoint, Bucket)
return self._client
def ossClient():
"""获取OSS客户端单例"""
return OSSClientSingleton().get_client()
#连接redis,单例模式
class RedisClientSingleton:
_instance = None
_client = None
# Redis连接配置
REDIS_CONFIG = {
'host': '192.168.100.254',
'password': '',
'port': 6379,
'db': 6,
'socket_timeout': 30, # 操作超时30秒
'socket_connect_timeout': 10, # 连接超时10秒
'socket_keepalive': True, # 启用 TCP keepalive
'socket_keepalive_options': {}, # keepalive 选项
'health_check_interval': 30 # 健康检查间隔30秒
}
# 重试配置
RETRY_INTERVAL = 5 # 重试间隔(秒)
MAX_RETRY_INTERVAL = 60 # 最大重试间隔(秒),用于指数退避
def __new__(cls):
if cls._instance is None:
cls._instance = super(RedisClientSingleton, cls).__new__(cls)
return cls._instance
def _create_redis_client(self):
"""创建Redis客户端"""
return redis.Redis(**self.REDIS_CONFIG)
def _connect_with_retry(self, max_retries=None, retry_interval=None):
"""
带重试机制的Redis连接方法
:param max_retries: 最大重试次数None表示无限重试
:param retry_interval: 重试间隔None表示使用默认值支持指数退避
:return: Redis客户端实例
"""
if retry_interval is None:
retry_interval = self.RETRY_INTERVAL
retry_count = 0
current_interval = retry_interval
while True:
try:
logger.info(f"尝试连接Redis (第 {retry_count + 1} 次)...")
client = self._create_redis_client()
# 测试连接
client.ping()
logger.info("Redis连接成功!")
return client
except (redis.ConnectionError, redis.TimeoutError, AttributeError, Exception) as e:
retry_count += 1
error_msg = str(e)
logger.warning(f"Redis连接失败 (第 {retry_count} 次): {error_msg}")
# 如果设置了最大重试次数且已达到,则抛出异常
if max_retries is not None and retry_count >= max_retries:
logger.error(f"达到最大重试次数 {max_retries},停止重试")
raise
# 指数退避:每次重试间隔逐渐增加,但不超过最大值
logger.info(f"等待 {current_interval} 秒后重试...")
time.sleep(current_interval)
current_interval = min(current_interval * 1.5, self.MAX_RETRY_INTERVAL)
def get_client(self):
"""
获取Redis客户端如果连接断开则自动重连带重试机制
:return: Redis客户端实例
"""
if self._client is None:
# 首次连接,使用无限重试直到成功
logger.info("初始化Redis连接...")
self._client = self._connect_with_retry(max_retries=None)
else:
# 检查连接是否有效,如果断开则重新连接(带重试)
try:
self._client.ping()
except (redis.ConnectionError, redis.TimeoutError, AttributeError) as e:
# 连接断开,重新创建连接(使用无限重试直到成功)
logger.warning(f"Redis连接已断开: {str(e)},开始重新连接...")
self._client = None
self._client = self._connect_with_retry(max_retries=None)
return self._client
# def redisClient():
# """获取Redis客户端单例"""
# return RedisClientSingleton().get_client()
def redisClientIntranet():
"""获取内网Redis客户端单例"""
return RedisClientSingleton().get_client()
Loading…
Cancel
Save