You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
277 lines
13 KiB
277 lines
13 KiB
import os, sys, time, bpy, math, requests, bmesh, json, shutil |
|
from PIL import Image |
|
import secrets |
|
import string |
|
import platform |
|
if platform.system() == 'Windows': |
|
sys.path.append('e:\\libs\\') |
|
#sys.path.append('libs') |
|
else: |
|
sys.path.append('/data/deploy/make3d/make2/libs/') |
|
import config, libs, libs_db,main_service_db,common,foot_mark_seam,libs_db_gpu |
|
|
|
def bmesh_copy_from_object(obj, transform=True, triangulate=True, apply_modifiers=False): |
|
"""Returns a transformed, triangulated copy of the mesh""" |
|
assert obj.type == 'MESH' |
|
if apply_modifiers and obj.modifiers: |
|
import bpy |
|
depsgraph = bpy.context.evaluated_depsgraph_get() |
|
obj_eval = obj.evaluated_get(depsgraph) |
|
me = obj_eval.to_mesh() |
|
bm = bmesh.new() |
|
bm.from_mesh(me) |
|
obj_eval.to_mesh_clear() |
|
else: |
|
me = obj.data |
|
if obj.mode == 'EDIT': |
|
bm_orig = bmesh.from_edit_mesh(me) |
|
bm = bm_orig.copy() |
|
else: |
|
bm = bmesh.new() |
|
bm.from_mesh(me) |
|
if transform: |
|
matrix = obj.matrix_world.copy() |
|
if not matrix.is_identity: |
|
bm.transform(matrix) |
|
matrix.translation.zero() |
|
if not matrix.is_identity: |
|
bm.normal_update() |
|
if triangulate: |
|
bmesh.ops.triangulate(bm, faces=bm.faces) |
|
return bm |
|
|
|
def find_pid_objname(pid): |
|
for obj in bpy.data.objects: |
|
if obj.name.startswith(str(pid)): |
|
return obj.name |
|
|
|
def reload_obj(pid): |
|
obj_filename = os.path.join(config.workdir, pid, 'output', f'{pid}.obj') |
|
bpy.ops.wm.read_homefile() |
|
bpy.ops.object.delete(use_global=False, confirm=False) |
|
bpy.ops.import_scene.obj(filepath=obj_filename) |
|
bpy.context.scene.unit_settings.scale_length = 1 |
|
bpy.context.scene.unit_settings.length_unit = 'CENTIMETERS' |
|
bpy.context.scene.unit_settings.mass_unit = 'GRAMS' |
|
|
|
obj = bpy.context.selected_objects[0] |
|
bpy.context.view_layer.objects.active = obj |
|
obj.select_set(True) |
|
|
|
def base_fix(pid): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 开始模型基础校正...') |
|
start_time = time.time() |
|
# 统一文件名规则 |
|
def fix_filename(pid): |
|
texture_filename_end = "" |
|
if os.path.exists(os.path.join(config.workdir, pid, 'output', f'{pid}.jpg')): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 已经是最新文件名规则,无需处理') |
|
return |
|
elif os.path.exists(os.path.join(config.workdir, pid, 'output', f'{pid}_u0_v0_diffuse.jpg')): |
|
texture_filename_end = '_u0_v0_diffuse.jpg' |
|
elif os.path.exists(os.path.join(config.workdir, pid, 'output', f'{pid}_u1_v1.jpg')): |
|
texture_filename_end = '_u1_v1.jpg' |
|
os.rename(os.path.join(config.workdir, pid, 'output', f'{pid}{texture_filename_end}'), os.path.join(config.workdir, pid, 'output', f'{pid}.jpg')) |
|
with open(os.path.join(config.workdir, pid, 'output', f'{pid}.mtl'), 'r') as f: |
|
lines = f.readlines() |
|
lines = [line.replace(texture_filename_end, '.jpg') for line in lines] |
|
with open(os.path.join(config.workdir, pid, 'output', f'{pid}.mtl'), 'w') as f: |
|
f.writelines(lines) |
|
f.close() |
|
fix_filename(pid) |
|
|
|
# 统一blender环境 |
|
reload_obj(pid) |
|
|
|
# 统一模型方向、位置、大小... |
|
pid_objname = find_pid_objname(pid) |
|
bpy.data.objects[pid_objname].rotation_euler = (0, 0, 0) |
|
bpy.ops.object.origin_set(type='ORIGIN_CENTER_OF_VOLUME', center='MEDIAN') |
|
bpy.context.object.location[0] = 0 |
|
bpy.context.object.location[1] = 0 |
|
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) |
|
|
|
#处理脚底UV |
|
# basepath = os.path.join(config.workdir, pid, 'output') |
|
# foot_mark_seam.get_obj_max_foot(basepath,os.path.join(basepath, f'{pid}.obj'),os.path.join(basepath, f'{pid}.jpg')) |
|
|
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 模型基础校正完成,共费时{libs.diff_time(start_time)}') |
|
|
|
def export_and_update_obj(pid): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 开始导出并上传模型...') |
|
start_time = time.time() |
|
obj_filename = os.path.join(config.workdir, pid, 'output', f'{pid}.obj') |
|
bpy.ops.export_scene.obj(filepath=obj_filename) |
|
|
|
# 上传到oss |
|
config.oss_bucket.put_object_from_file(f'objs/auto/{pid}/{pid}.obj', os.path.join(config.workdir, pid, 'output', f'{pid}.obj')) |
|
config.oss_bucket.put_object_from_file(f'objs/auto/{pid}/{pid}.mtl', os.path.join(config.workdir, pid, 'output', f'{pid}.mtl')) |
|
config.oss_bucket.put_object_from_file(f'objs/auto/{pid}/{pid}.jpg', os.path.join(config.workdir, pid, 'output', f'{pid}.jpg')) |
|
config.oss_bucket.put_object_from_file(f'objs/auto/{pid}/{pid}.obj.rcInfo', os.path.join(config.workdir, pid, 'output', f'{pid}.obj.rcInfo')) |
|
|
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 模型导出并上传完成,共费时{libs.diff_time(start_time)}') |
|
|
|
def resize_texture_and_reload_obj(pid, ratio=0.5): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 开始压缩贴图并重载模型...') |
|
start_time = time.time() |
|
bpy.ops.wm.quit_blender() |
|
|
|
image_name = os.path.join(config.workdir, pid, 'output', f'{pid}.jpg') |
|
img = Image.open(image_name) |
|
w, h = img.size |
|
img = img.resize((int(w * ratio), int(h * ratio))) |
|
img.save(image_name, optimize=True, quality=95) |
|
|
|
reload_obj(pid) |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 贴图压缩并重载模型完成,共费时{libs.diff_time(start_time)}') |
|
|
|
def export_and_update_glbs(pid): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 开始导出并上传审核模型和3D相册模型glb文件...') |
|
start_time = time.time() |
|
headcount = libs.getHeadCount(pid) |
|
pid_objname = find_pid_objname(pid) |
|
obj = bpy.data.objects[pid_objname] |
|
obj.select_set(True) |
|
|
|
model_info = {} |
|
model_info['headcount'] = headcount |
|
model_info['faces'] = round(len(obj.data.polygons) / 10000) |
|
model_info['height'] = round(obj.dimensions.y * 100) |
|
|
|
# bpy.ops.wm.save_as_mainfile(filepath=os.path.join(config.workdir, pid, f'{pid}.blend')) |
|
|
|
# 统一缩放到9cm标准尺寸 |
|
scale = 90 / bpy.data.objects[pid_objname].dimensions.y |
|
bpy.data.objects[pid_objname].scale = (scale, scale, scale) |
|
bpy.ops.object.transform_apply(scale=True) |
|
|
|
# bpy.ops.wm.save_as_mainfile(filepath=os.path.join(config.workdir, pid, f'{pid}-9cm.blend')) |
|
|
|
bm = bmesh_copy_from_object(obj) |
|
model_info['volume'] = round(bm.calc_volume(), 2) |
|
model_info['weight'] = round(model_info['volume'] * 1.226, 2) |
|
print(f'{pid}的模型数据:{model_info}') |
|
|
|
res = requests.get(f'{config.urls["upload_model_info_url"]}?pid={pid}&headcount={headcount}&faces={model_info["faces"]}&volume={model_info["volume"]}&weight={model_info["weight"]}&height={model_info["height"]}') |
|
print('上传模型数据:', res.text) |
|
# with open(os.path.join(config.sharedir, 'model_info', f'{pid}.json'), 'w') as f: |
|
# json.dump(model_info, f) |
|
# f.close() |
|
|
|
# 先生成审核模型 |
|
faces_dest = 500000 * headcount |
|
# 减面 |
|
faces_current = len(bpy.data.objects[pid_objname].data.polygons) |
|
print(f'当前面数:{faces_current},目标面数:{faces_dest}') |
|
|
|
bpy.ops.object.modifier_add(type='DECIMATE') |
|
bpy.context.object.modifiers["Decimate"].ratio = faces_dest / faces_current |
|
bpy.ops.object.modifier_apply(modifier="Decimate") |
|
|
|
glb_filename = os.path.join(config.workdir, pid, 'output', f'{pid}.glb') |
|
bpy.ops.export_scene.gltf(filepath=glb_filename, export_format='GLB', export_apply=True, export_jpeg_quality=75, export_draco_mesh_compression_enable=False) |
|
|
|
# 再生成数字模型 |
|
faces_dest = 120000 * headcount |
|
|
|
# 减面 |
|
faces_current = len(bpy.data.objects[pid_objname].data.polygons) |
|
print(f'当前面数:{faces_current},目标面数:{faces_dest}') |
|
|
|
bpy.ops.object.modifier_add(type='DECIMATE') |
|
bpy.context.object.modifiers["Decimate"].ratio = faces_dest / faces_current |
|
bpy.ops.object.modifier_apply(modifier="Decimate") |
|
|
|
glb_filename = os.path.join(config.workdir, pid, 'output', f'{pid}-3d.glb') |
|
bpy.ops.export_scene.gltf(filepath=glb_filename, export_format='GLB', export_apply=True, export_jpeg_quality=75, export_draco_mesh_compression_enable=False) |
|
|
|
os.system(f'gltfpack -c -i {os.path.join(config.workdir, pid, "output", f"{pid}.glb")} -o {os.path.join(config.workdir, pid, "output", f"{pid}-pack.glb")}') |
|
config.oss_bucket.put_object_from_file(f'glbs/auto/{pid}.glb', os.path.join(config.workdir, pid, 'output', f'{pid}-pack.glb')) |
|
config.oss_bucket.put_object_from_file(f'glbs/3d/{pid}.glb', os.path.join(config.workdir, pid, 'output', f'{pid}-3d.glb')) |
|
|
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} glb文件导出并上传完成,共费时{libs.diff_time(start_time)}') |
|
|
|
def step3(pid,task_distributed_id=""): |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 开始模型后道处理') |
|
start_time = time.time() |
|
# 方向、大小、位置等基础校正 |
|
base_fix(pid) |
|
# 去灰 |
|
|
|
# 避开人脸白色提纯 |
|
|
|
# TODO: 人脸五官特征加深 |
|
# TODO: 自动UV分割,优先级顺序:脚底、人脸、手指、手臂内侧、大腿内侧、前身、后背、其他 |
|
# TODO: 根据UV分割自动修贴图 |
|
# 调用blender生成3D相册glb文件和修模审核glb文件,压缩贴图 |
|
|
|
export_and_update_obj(pid) |
|
resize_texture_and_reload_obj(pid) |
|
export_and_update_glbs(pid) |
|
#向gpu服务器数据库添加一条数据用于AI自动修模,目前显示修复脸,不能上传更新建模状态,防止任务进入到修模部,人数为1才进行处理 |
|
headCount = libs.getHeadCount(pid) |
|
#if headCount == 1: |
|
libs_db_gpu.add_task(data={"pid":pid,"cut_body":"face","heads":headCount}) |
|
|
|
# 更新本地任务状态,更新云端任务状态 |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} pid: {pid} 模型后道处理完成,共费时{libs.diff_time(start_time)}') |
|
res = requests.post(config.urls['update_status_modelsuccess_url'], data={'id': pid}) |
|
print('上传完成更新建模成功状态:', res.text) |
|
#shutil.rmtree(os.path.join(config.workdir, pid), ignore_errors=True) |
|
libs_db.finish_task({"task_type": "make", "task_key": pid}) |
|
if task_distributed_id: |
|
#因为step2 和step3 是一起跑的,所以这一步先注释掉更新子表的finished_at |
|
#main_service_db.update_task_distributed_detail({"task_distributed_id":task_distributed_id,"finished_at":time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}) |
|
|
|
#更新主表的status 和 finished_at |
|
main_service_db.update_task_distributed({"id":task_distributed_id,"status":2,"finished_at":time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}) |
|
print("step3 已执行完成") |
|
#return |
|
|
|
#执行获取obj缩略图 |
|
print("执行获取obj全身缩略图脚本") |
|
os.system(f'python d:\\make2\\tools\pic_for_obj\image_rander_small.py -pid {pid} -i D://{pid}/output -o D://{pid}/output') |
|
#判断文件是否存在,存在则上传到oss, 更新数据库内容 |
|
if os.path.exists(f'D://{pid}/output/{pid}_pic.png'): |
|
#获取拍照订单的信息,从中得到拍照订单的信息 |
|
uuid = get_p3d_info(pid)["data"]["uuid"] |
|
print("uuid",uuid) |
|
#config.oss_bucket.put_object_from_file(f'objs/auto/{pid}/{pid}_pic.png', f'D://{pid}/output/{pid}_pic.png') |
|
# #更新数据库 |
|
# main_service_db.update_task_distributed({"id":task_distributed_id,"pic_url":f'{pid}_pic.png'}) |
|
|
|
|
|
#移除文件夹 |
|
common.removeFolder(str(pid)) |
|
|
|
os.system(f"python D:/make2/tools/get_weight_by_pid.py {pid}") |
|
#执行模型优化,异步调用,再ai_repair 服务区上执行 |
|
os.system(f"python D://make2/tools/optimize_model/main.py {pid}") |
|
|
|
def get_p3d_info(pid): |
|
url = "https://mp.api.suwa3d.com/api/customerP3dLog/info?id="+pid |
|
res = requests.get(url) |
|
res = res.json() |
|
print("获取拍照订单数据",res) |
|
return res |
|
|
|
|
|
def main(pid): |
|
if pid == '0': |
|
while True: |
|
# 取本地mysql队列任务,完成第三步的建模后处理任务 |
|
|
|
step3(pid) |
|
else: |
|
step3(pid) |
|
|
|
if __name__ == '__main__': |
|
# 取本地mysql队列任务,完成第三步的建模后处理任务 |
|
# 默认循环值守,可传参数运行单一任务,以方便调试 |
|
pid = '0' |
|
if len(sys.argv) > 1: |
|
pids = sys.argv[1].split(',') |
|
for pid in pids: |
|
main(pid) |
|
exit() |
|
main(pid) |