You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
312 lines
14 KiB
312 lines
14 KiB
import math, os, time, sys, bpy, argparse |
|
from tqdm import tqdm |
|
from contextlib import contextmanager |
|
from mathutils import Vector |
|
from PIL import Image |
|
import numpy as np |
|
|
|
|
|
@contextmanager |
|
def stdout_redirected(to=os.devnull): |
|
''' |
|
import os |
|
|
|
with stdout_redirected(to=filename): |
|
print("from Python") |
|
os.system("echo non-Python applications are also supported") |
|
''' |
|
fd = sys.stdout.fileno() |
|
|
|
##### assert that Python and C stdio write using the same file descriptor |
|
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1 |
|
|
|
def _redirect_stdout(to): |
|
sys.stdout.close() # + implicit flush() |
|
os.dup2(to.fileno(), fd) # fd writes to 'to' file |
|
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd |
|
|
|
with os.fdopen(os.dup(fd), 'w') as old_stdout: |
|
with open(to, 'w') as file: |
|
_redirect_stdout(to=file) |
|
try: |
|
yield # allow code to be run with the redirected stdout |
|
finally: |
|
_redirect_stdout(to=old_stdout) # restore stdout. |
|
# buffering and flags such as |
|
# CLOEXEC may be different |
|
|
|
|
|
def check_image_rander(png_image_dir): |
|
"""检查图片生成是否正常""" |
|
image_list = os.listdir(png_image_dir) |
|
if len(image_list)>0: |
|
image_path = os.path.join(png_image_dir,image_list[0]) |
|
img = Image.open(image_path) |
|
|
|
# 将图像转换为 RGB 模式 |
|
img = img.convert("RGB") |
|
|
|
# 将图像数据转换为 numpy 数组 |
|
img_data = np.array(img) |
|
|
|
# 设定颜色分段数 |
|
color_bins = 24 |
|
|
|
# 将颜色分成 24 种,计算颜色区间 |
|
# 对每个 RGB 值进行分段 |
|
def get_color_bin(value, bins): |
|
return (value * bins) // 256 |
|
|
|
# 用于存储所有颜色的集合 |
|
color_set = set() |
|
|
|
# 遍历图像每一个像素 |
|
for row in img_data: |
|
for pixel in row: |
|
# 将 RGB 每个通道的值分段 |
|
r, g, b = pixel |
|
r_bin = get_color_bin(r, color_bins) |
|
g_bin = get_color_bin(g, color_bins) |
|
b_bin = get_color_bin(b, color_bins) |
|
|
|
# 将 RGB 分段后的颜色组合成一个元组 |
|
color_set.add((r_bin, g_bin, b_bin)) |
|
|
|
# 输出不同的颜色数量 |
|
print(f"该图像中有 {len(color_set)} 种颜色。") |
|
if len(color_set)>20: |
|
return True |
|
else: |
|
return False |
|
else: |
|
return False |
|
|
|
|
|
def rander_image_and_check(args): |
|
"""渲染图片""" |
|
# 列出obj列表 |
|
pid = args.pid |
|
pid_file_dir = os.path.join(args.input) |
|
if not os.path.exists(pid_file_dir): |
|
print(f"不存在{pid_file_dir}") |
|
return |
|
obj_file_list = [aa for aa in os.listdir(pid_file_dir) if aa.endswith(".obj")] |
|
if len(obj_file_list)==0: |
|
print("没有obj文件",pid) |
|
return |
|
texture_path_list = [aa for aa in os.listdir(pid_file_dir) if aa.endswith(".jpg")] |
|
if len(texture_path_list)==0: |
|
print("没有贴图文件",pid) |
|
return |
|
texture_file = texture_path_list[0] |
|
sorted_obj_file_list = sorted(obj_file_list, key=len) |
|
print(sorted_obj_file_list) |
|
is_rander = False |
|
# 根据obj表渲染图片 |
|
for obj_file in sorted_obj_file_list: |
|
start = time.time() |
|
# 初始化blender环境 |
|
bpy.ops.wm.read_homefile() |
|
bpy.ops.object.delete(use_global=False, confirm=False) |
|
bpy.context.scene.unit_settings.scale_length = 1 |
|
bpy.context.scene.unit_settings.length_unit = 'CENTIMETERS' |
|
bpy.context.scene.unit_settings.mass_unit = 'GRAMS' |
|
# bpy.context.scene.render.engine = 'CYCLES' |
|
bpy.context.scene.render.engine = 'BLENDER_EEVEE' # ('BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'CYCLES') |
|
bpy.context.scene.cycles.feature_set = 'SUPPORTED' |
|
bpy.context.scene.cycles.device = 'GPU' |
|
bpy.context.scene.cycles.preview_samples = args.sample |
|
bpy.context.scene.cycles.samples = args.sample |
|
bpy.context.scene.render.film_transparent = True |
|
bpy.context.scene.cycles.use_progressive_refining = True # 启用渐进式渲染 |
|
|
|
# 导入模型文件 |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}: import pid: {pid}') |
|
start_import = time.time() |
|
print("===============", os.path.join(args.input, pid, obj_file)) |
|
with stdout_redirected(to=os.devnull): |
|
bpy.ops.import_scene.obj(filepath=os.path.join(args.input, obj_file)) |
|
print( |
|
f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}: import pid: {pid} done in {time.time() - start_import:.2f}s') |
|
texture_path = os.path.join(args.input,texture_file) |
|
materials = bpy.data.materials |
|
for material in materials: |
|
# 确保材质使用节点树 |
|
if not material.use_nodes: |
|
material.use_nodes = True |
|
node_tree = material.node_tree |
|
texture_node = node_tree.nodes.new(type='ShaderNodeTexImage') |
|
texture_node.image = bpy.data.images.load(texture_path) |
|
bsdf_node = node_tree.nodes.get('Principled BSDF') |
|
if bsdf_node: |
|
node_tree.links.new(bsdf_node.inputs['Base Color'], texture_node.outputs['Color']) |
|
# 获取模型对象 |
|
imported_object = bpy.context.selected_objects[0] # 假设只导入一个模型 |
|
# 获取模型的边界框(bounding box) |
|
bbox_corners = [imported_object.matrix_world @ Vector(corner) for corner in imported_object.bound_box] |
|
min_z = min(corner.z for corner in bbox_corners) |
|
bpy.ops.object.select_all(action='DESELECT') |
|
imported_object.select_set(True) |
|
bpy.context.view_layer.objects.active = imported_object |
|
|
|
# Move object down so lowest point is at origin |
|
imported_object.location.z -= min_z |
|
|
|
# Apply transformations |
|
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) |
|
bbox_corners = [imported_object.matrix_world @ Vector(corner) for corner in imported_object.bound_box] |
|
# 获取最高点的 Y 值 |
|
model_height = max(corner.z for corner in bbox_corners) |
|
|
|
print(f"The highest Y value of the model is: {model_height}") |
|
|
|
# 获取相机的位置 (假设相机位置参数已经在args中设定) |
|
camera_position = args.cameras['1']['position'] |
|
|
|
# 计算缩放比例:假设模型的高度是1米(或者你可以通过其他方法计算模型的实际高度) |
|
# model_height =25 # 这是一个假设值,实际应用中需要根据模型大小来设定 |
|
scale_factor = camera_position / model_height # 根据相机高度和模型高度计算缩放比例 |
|
bpy.context.view_layer.objects.active = imported_object |
|
imported_object.select_set(True) |
|
bpy.ops.object.align(align_mode='OPT_1', relative_to='OPT_1', align_axis={'Z'}) |
|
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) |
|
# 设置模型的缩放 |
|
imported_object.scale = (scale_factor, scale_factor, scale_factor) |
|
bpy.context.object.rotation_euler = (0, 0, 0) |
|
bpy.ops.object.origin_set(type='ORIGIN_CENTER_OF_VOLUME', center='MEDIAN') |
|
bpy.context.object.location[0] = 0 # 移动到特定位置(1m, 1m)是为了让human3d可以正确识别 |
|
bpy.context.object.location[1] = 0 |
|
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) |
|
|
|
# 设置环境光 |
|
bpy.data.scenes['Scene'].world.use_nodes = True |
|
enode = bpy.data.scenes['Scene'].world.node_tree.nodes.new("ShaderNodeTexEnvironment") |
|
enode.image = bpy.data.images.load(args.Env_Texture_path) |
|
bpy.data.scenes['Scene'].world.node_tree.links.new(enode.outputs['Color'], |
|
bpy.data.scenes['Scene'].world.node_tree.nodes[ |
|
'Background'].inputs['Color']) |
|
# 初始化相机 |
|
camera_lens = 9 # 相机焦距,调整焦距改变镜头广角角度覆盖视野范围 |
|
camera_sensor_width = 25.4 # 相机传感器宽度 1英寸 = 25.4mm |
|
camera_angle = 2 * math.pi / args.ps_column # 每台相机的圆心角(弧度) |
|
# 初始相机参数 |
|
bpy.ops.object.select_all(action='DESELECT') |
|
obj_camera = bpy.data.objects['Camera'] |
|
bpy.context.view_layer.objects.active = obj_camera |
|
obj_camera.select_set(True) |
|
bpy.context.object.data.type = args.len_type |
|
bpy.context.object.data.sensor_width = camera_sensor_width |
|
bpy.context.scene.render.resolution_x = args.resolution_x |
|
bpy.context.scene.render.resolution_y = args.resolution_y |
|
obj_camera.name = 'camera_init' |
|
|
|
# 计算并输出所有相机的坐标和旋转 |
|
for column in tqdm(range(args.ps_column)): |
|
theta = (column + 1) * camera_angle |
|
x = args.ps_radius * math.sin(theta) |
|
y = args.ps_radius * math.cos(theta) |
|
|
|
for camera_id, camera_info in args.cameras.items(): |
|
x_ble = x |
|
y_ble = y |
|
z_ble = camera_info['position'] |
|
|
|
camera_copy_name = obj_camera.copy() |
|
camera_copy_name.data = obj_camera.data.copy() |
|
bpy.context.collection.objects.link(camera_copy_name) |
|
bpy.ops.object.select_all(action='DESELECT') |
|
bpy.context.view_layer.objects.active = camera_copy_name |
|
camera_copy_name.select_set(True) |
|
camera_copy_name.name = f"camera_{column + 1}_{camera_id}" |
|
|
|
if args.len_type == 'PERSP': |
|
bpy.context.object.data.lens = camera_info['PERSP_lens'] |
|
elif args.len_type == 'ORTHO': |
|
bpy.context.object.data.ortho_scale = camera_info['ORTHO_scale'] |
|
|
|
camera_copy_name.location = (x_ble, -y_ble, z_ble) |
|
camera_copy_name.rotation_euler = (camera_info['rotation'], 0, theta) |
|
|
|
# 渲染图片 |
|
bpy.context.scene.camera = bpy.data.objects[camera_copy_name.name] # 切换当前相机 |
|
os.makedirs(os.path.join(args.output, args.len_type, |
|
f'{pid}_{args.resolution_x}x{args.resolution_y}_{args.ps_column}', camera_id), |
|
exist_ok=True) |
|
png_name = f"{pid}_pic.png" |
|
png_out_path = os.path.join(args.output,png_name) |
|
print(f"图片输出地址-{png_out_path}") |
|
bpy.context.scene.render.filepath = png_out_path |
|
bpy.context.scene.render.image_settings.file_format = 'PNG' |
|
|
|
with stdout_redirected(): |
|
bpy.ops.render.render(write_still=True) |
|
|
|
# delete camera_init |
|
bpy.data.objects.remove(bpy.context.scene.objects['camera_init']) |
|
bpy.data.objects.remove(bpy.context.scene.objects['Light']) |
|
bpy.ops.wm.quit_blender() |
|
print(f'{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}: render pid: {pid} done, time: {time.time() - start:.2f}s') |
|
png_image_dir = os.path.join(args.output, args.len_type,f'{pid}_{args.resolution_x}x{args.resolution_y}_{args.ps_column}',"1",) |
|
time.sleep(2) |
|
if os.path.exists(png_image_dir): |
|
if check_image_rander(png_image_dir): |
|
break |
|
|
|
|
|
|
|
def get_args(): |
|
argparser = argparse.ArgumentParser(description='Render the camera in the studio') |
|
argparser.add_argument('-pid', '--pid', type=str, default='236479', help='pid of the model') |
|
argparser.add_argument('-i', '--input', type=str, default='/data/datasets_20t/obj_rander/', |
|
help='input models path') |
|
argparser.add_argument('-o', '--output', type=str, default='/data/datasets_20t/render_images/', |
|
help='output images path') |
|
argparser.add_argument('-l', '--limit', type=int, default=0, help='render limit') |
|
argparser.add_argument('-ps_radius', '--ps_radius', type=float, default='18', help='photo studio radius, unit: m') |
|
argparser.add_argument('-ps_column', '--ps_column', type=int, default='1', help='photo studio column number') |
|
argparser.add_argument('-t', '--len_type', type=str, default='ORTHO', help='camera lens type, PERSP or.ORTHO') |
|
argparser.add_argument('-r_x', '--resolution_x', type=int, default='768', help='render resolution_x') |
|
argparser.add_argument('-r_y', '--resolution_y', type=int, default='1024', help='render resolution_y') |
|
argparser.add_argument('-s', '--sample', type=int, default='512', help='render sample') |
|
argparser.add_argument('-e', '--Env_Texture_path', type=str, default='D://make2/tools/pic_for_obj/hdrs/studio_small_08_2k.exr', |
|
help='enviroment texture path') |
|
args = argparser.parse_args() |
|
|
|
# 相机参数,人体按2米身高,多人拍照覆盖设置 |
|
args.cameras = { |
|
'1': { |
|
'position': 2.5, # 1号相机的位置 0.9米高 |
|
'rotation': math.radians(86), |
|
'PERSP_lens': 8, |
|
'ORTHO_scale': 3.0, |
|
}, |
|
# '2': { |
|
# 'position': 1.8 , # 2号相机的位置 1.8米高 |
|
# 'rotation': math.radians(58), |
|
# 'PERSP_lens': 13, |
|
# 'ORTHO_scale': 2.5, |
|
# } |
|
} |
|
return args |
|
|
|
|
|
def rander_image_run(args): |
|
"""主流程""" |
|
if not os.path.exists(args.output): |
|
os.makedirs(args.output) |
|
try: |
|
rander_image_and_check(args) |
|
except : |
|
print("图片渲染错误",args.pid) |
|
|
|
if __name__ == '__main__': |
|
args = get_args() |
|
print(args) |
|
rander_image_run(args) |
|
#python image_rander_small.py -pid 234161 -i /data/datasets_20t/obj_rander/ -o /data/datasets_20t/render_images/ |
|
# python image_rander_small.py -pid 5924 -i /data/datasets_20t/obj_rander/ -o /data/datasets_20t/render_images/ |
|
""" |
|
argparser.add_argument('-e', '--Env_Texture_path', type=str, default='/data/datasets/hdrs/studio_small_08_2k.exr', |
|
替换成自己的hdrs路径 bpy 3.4.0 |
|
|
|
"""
|
|
|