dongchangxi 7 months ago
parent
commit
e0cf06eac5
  1. 6
      apps/auto_convert3d_new.py
  2. 242
      apps/fix_up_color_two.py
  3. 192
      apps/ps_image_shadow_up_ag.py
  4. 139
      apps/white_purification_v1.py

6
apps/auto_convert3d_new.py

@ -3,7 +3,7 @@ import os, oss2, time, redis, requests, shutil, sys, subprocess, json, platform
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
from retrying import retry from retrying import retry
import atexit,platform import atexit,platform
import white_purification import white_purification_v1
if platform.system() == 'Windows': if platform.system() == 'Windows':
sys.path.append('e:\\libs\\') sys.path.append('e:\\libs\\')
import common import common
@ -127,7 +127,9 @@ def team_check(r):
#对文件进行白色提纯处理 #对文件进行白色提纯处理
imagePath = os.path.join(workdir, 'print', pid,pid+"Tex1.jpg") imagePath = os.path.join(workdir, 'print', pid,pid+"Tex1.jpg")
print("开始处理白色提纯") print("开始处理白色提纯")
white_purification.white_purification_utils(imagePath) #white_purification_v1.white_purification_utils(imagePath)
os.system(f'python d:\\make2\\apps\white_purification_v1.py.py -i {imagePath}')
print("贴图文件白色提纯完成",imagePath) print("贴图文件白色提纯完成",imagePath)
#提纯完重新上传提纯图片 #提纯完重新上传提纯图片
ossImagePath = os.path.join("objs/print", pid,ossPath,"texture","process_"+pid+"Tex1.jpg") ossImagePath = os.path.join("objs/print", pid,ossPath,"texture","process_"+pid+"Tex1.jpg")

242
apps/fix_up_color_two.py

@ -0,0 +1,242 @@
import cv2, numpy as np, matplotlib.pyplot as plt, os, shutil, argparse, random, time, math
from tqdm import tqdm
def ps_color_scale_adjustment(image, shadow=0, highlight=255, midtones=1):
'''
模拟 PS 的色阶调整 0 <= Shadow < Highlight <= 255
:param image: 传入的图片
:param shadow: 黑场(0-Highlight)
:param highlight: 白场(Shadow-255)
:param midtones: 灰场(9.99-0.01)
:return: 图片
'''
if highlight >255:
highlight = 255
if shadow < 0:
shadow = 0
if shadow >= highlight:
shadow = highlight - 2
if midtones > 9.99:
midtones = 9.99
if midtones < 0.01:
midtones = 0.01
image = np.array(image, dtype=np.float16)
# 计算白场 黑场离差
Diff = highlight - shadow
image = image - shadow
image[image < 0] = 0
image = (image / Diff) ** (1 / midtones) * 255
image[image > 255] = 255
image = np.array(image, dtype=np.uint8)
return image
# def show_histogram(image, image_id, save_hist_dir, min_threshold, max_threshold):
# '''
# 画出直方图展示
# :param image: 导入图片
# :param image_id: 图片id编号
# :param save_hist_dir: 保存路径
# :param min_threshold: 最小阈值
# :param max_threshold: 最大阈值
# :return: 原图image,和裁剪原图直方图高低阈值后的图片image_change
# '''
# plt.rcParams['font.family'] = 'SimHei'
# plt.rcParams['axes.unicode_minus'] = False
# plt.hist(image.ravel(), 254, range=(2, 256), density=False)
# plt.hist(image.ravel(), 96, range=(2, 50), density=False) # 放大 range(0, 50),bins值最好是range的两倍,显得更稀疏,便于对比
# plt.hist(image.ravel(), 110, range=(200, 255), density=False) # 放大 range(225, 255)
# plt.annotate('thresh1=' + str(min_threshold), # 文本内容
# xy=(min_threshold, 0), # 箭头指向位置 # 阈值设定值!
# xytext=(min_threshold, 500000), # 文本位置 # 阈值设定值!
# arrowprops=dict(facecolor='black', width=1, shrink=5, headwidth=2)) # 箭头
# plt.annotate('thresh2=' + str(max_threshold), # 文本内容
# xy=(max_threshold, 0), # 箭头指向位置 # 阈值设定值!
# xytext=(max_threshold, 500000), # 文本位置 # 阈值设定值!
# arrowprops=dict(facecolor='black', width=1, shrink=5, headwidth=2)) # 箭头
# # 在y轴上绘制一条直线
# # plt.axhline(y=10000, color='r', linestyle='--', linewidth=0.5)
# plt.title(str(image_id))
# # plt.show()
# # 保存直方图
# save_hist_name = os.path.join(save_hist_dir, f'{image_id}_{min_threshold}&{max_threshold}.jpg')
# plt.savefig(save_hist_name)
# # 清空画布, 防止重叠展示
# plt.clf()
# def low_find_histogram_range(image, target_frequency):
# '''
# 循环查找在 target_frequency (y)频次限制下的直方图区间值(x)
# :param image: 导入图片
# :param target_frequency: 直方图 y 频次限制条件
# :return: 直方图区间 x,和 该区间频次 y
# '''
# # 计算灰度直方图
# hist, bins = np.histogram(image, bins=256, range=[0, 256])
# # 初始化区间和频次
# interval = 2
# frequency = hist[255]
# while frequency < target_frequency:
# # 更新区间和频次
# interval += 1
# # 检查直方图的频次是否为None,如果频次是None,则将其设为0,这样可以避免将None和int进行比较报错。
# frequency = hist[interval] if hist[interval] is not None else 0
# frequency += hist[interval] if hist[interval] is not None else 0
# # 如果频次接近10000则停止循环
# if target_frequency - 2000 <= frequency <= target_frequency + 1000:
# break
#
# return interval, frequency
# def high_find_histogram_range(image, target_frequency):
# '''
# 循环查找在 target_frequency (y)频次限制下的直方图区间值(x)
# :param image: 导入图片
# :param target_frequency: 直方图 y 频次限制条件
# :return: 直方图区间 x,和 该区间频次 y
# '''
# # 计算灰度直方图
# hist, bins = np.histogram(image, bins=256, range=[0, 256])
# # 初始化区间和频次
# interval = 255
# frequency = hist[255]
# while frequency < target_frequency:
# # 更新区间和频次
# interval -= 1
# # 检查直方图的频次是否为None,如果频次是None,则将其设为0,这样可以避免将None和int进行比较报错。
# frequency = hist[interval] if hist[interval] is not None else 0
# frequency += hist[interval] if hist[interval] is not None else 0
# # 如果频次接近10000则停止循环
# if target_frequency - 2000 <= frequency <= target_frequency + 2000:
# break
#
# return interval, frequency
def find_last_x(image, slope_threshold = 1000):
x = []
y = []
hist, bins = np.histogram(image, bins=256, range=[0, 256])
#找到50以内的最高峰
max_y = 0
max_i = 5
for i in range(5, 25):
if hist[i] > max_y:
max_y = hist[i]
max_i = i
print(f'50以内最高峰值y:{max_y},最高峰位置x:{max_i}')
for i in range(2, max_i):
x.append(i)
y.append(hist[i])
slopes = [abs(y[i + 1] - y[i]) for i in range(len(x) - 1)]
current_interval = []
max_interval = []
max_x = {}
for i, slope in enumerate(slopes):
current_interval.append(slope)
if slope >= slope_threshold:
if len(current_interval) > len(max_interval):
max_interval = current_interval.copy()
max_x[x[i]] = slope
current_interval = []
print(max_x)
last_x = list(max_x)[-1]
last_y = max_x[last_x]
return last_x, last_y
def find_last_high(image, slope_threshold = 2500):
x = []
y = []
hist, bins = np.histogram(image, bins=255, range=[2, 255])
#找到200以上的最高峰
max_y = 0
max_i = 254
for i in range(220, 255):
if hist[i] > max_y:
max_y = hist[i]
max_i = i
print(f'200以上的最高峰值y:{max_y},最高峰位置x:{max_i}')
for i in range(max_i, 255):
x.append(i)
y.append(hist[i])
slopes = [abs(y[i + 1] - y[i]) for i in range(len(x) - 1)]
current_interval = []
max_interval = []
max_x = {}
find = False
for i in range(len(slopes) - 1, -1, -1):
slope = slopes[i]
current_interval.append(slope)
if slope >= slope_threshold:
find = True
if len(current_interval) > len(max_interval):
max_interval = current_interval.copy()
max_x[x[i]] = slope
current_interval = []
#如果没有找到200以上很平,而且高度小于5000,就按220位置削平
if not find and hist[220] < 5000:
max_x[220] = hist[220]
print(max_x)
if len(max_x) > 0:
last_x = list(max_x)[0]
last_y = max_x[last_x]
if last_x < 220:
last_x = 220
# last_y = max_x[last_x]
else:
print(f'找不到200以上曲线较平的区间,使用254作为最高峰')
last_x = 254
last_y = hist[254]
return last_x, last_y
def remove_gray_and_sharpening(jpg_path,save_hist_dir):
input_image = cv2.imread(jpg_path)
filename = os.path.basename(jpg_path)
# low_x_thresh, low_y_frequency = low_find_histogram_range(input_image, low_y_limit)
low_x_thresh, low_y_frequency = find_last_x(input_image)
# high_x_thresh, high_y_frequency = high_find_histogram_range(input_image, high_y_limit)
high_x_thresh, high_y_frequency = find_last_high(input_image)
# print(f"{low_x_thresh} 区间, {low_y_frequency} 频次")
# print(f"{high_x_thresh} 区间, {high_y_frequency} 频次")
output_filename = os.path.join(save_hist_dir, filename).replace('.jpg', f'_{low_x_thresh}_{high_x_thresh}.jpg')
high_output_image = ps_color_scale_adjustment(input_image, shadow=low_x_thresh, highlight=high_x_thresh, midtones=1)
# high_output_image = ps_color_scale_adjustment(low_ouput_image, shadow=0, highlight=high_x_thresh, midtones=1)
# # 人体贴图和黑色背景交界处不进行锐化
# gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
# _, thresh = cv2.threshold(gray, 2, 255, cv2.THRESH_BINARY)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
# gradient = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel)
# roi_gradient = cv2.bitwise_and(high_output_image, high_output_image, mask=gradient)
# # 锐化滤波器
# # sharpened_image = sharpening_filter(high_output_image)
# sharpened_image = reduce_sharpness(high_output_image, factor=4)
# # 将原图边界替换锐化后的图片边界
# sharpened_image[gradient != 0] = roi_gradient[gradient != 0]
# 直方图标记并保存
# show_histogram(input_image, img_id, low_x_thresh, high_x_thresh)
# cv2.imwrite(jpg_path, high_output_image, [cv2.IMWRITE_JPEG_QUALITY, 95]) # 保存图片的质量是原图的 95%
cv2.imwrite(output_filename, high_output_image, [cv2.IMWRITE_JPEG_QUALITY, 95]) # 保存图片的质量是原图的 95%
return output_filename
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--jpg_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/input')
arg.add_argument('--save_hist_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/output')
args = arg.parse_args()
for img_id in tqdm(os.listdir(args.jpg_dir)):
jpg_path = os.path.join(args.jpg_dir, img_id)
remove_gray_and_sharpening(jpg_path,args.save_hist_dir)

192
apps/ps_image_shadow_up_ag.py

@ -0,0 +1,192 @@
import os.path
import numpy as np
import cv2
import argparse
def adjust_levels_image(img):
""""""
img = img.astype(np.float32)
img = 255 * ((img - 20) / (241 - 20))
img[img < 0] = 0
img[img > 255] = 255
img = 255 * np.power(img / 255.0, 1.0 / 1.34)
img = (img / 255) * (255- 0) + 0
img[img < 0] = 0
img[img > 255] = 255
img = img.astype(np.uint8)
return img
def photoshop_style_feather(image, mask, radius=150):
"""
"""
if mask.dtype != np.uint8:
mask = (mask * 255).astype(np.uint8)
if len(mask.shape) > 2:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
kernel_size = max(3, int(2 * np.ceil(2 * radius) + 1))
expanded_size = (mask.shape[0] + 2 * radius, mask.shape[1] + 2 * radius)
expanded_mask = np.zeros(expanded_size, dtype=np.uint8)
center_y, center_x = radius, radius
expanded_mask[center_y:center_y + mask.shape[0],
center_x:center_x + mask.shape[1]] = mask
blurred_expanded_mask = cv2.GaussianBlur(
expanded_mask,
(kernel_size, kernel_size),
sigmaX=radius,
sigmaY=radius,
borderType=cv2.BORDER_REFLECT_101
)
feathered_mask = blurred_expanded_mask[center_y:center_y + mask.shape[0],
center_x:center_x + mask.shape[1]]
feathered_mask = feathered_mask.astype(np.float32) / 255.0
feathered_mask = np.power(feathered_mask, 1.1) # 轻微增强对比度
feathered_mask = np.clip(feathered_mask * 255, 0, 255).astype(np.uint8)
return feathered_mask
def calculate_luminance(img):
""""""
if len(img.shape) == 3:
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
return ycrcb[:, :, 0].astype(np.float32) / 255.0
else:
return img.astype(np.float32) / 255.0
def photoshop_feather_blend(adjusted_img, original_img, mask, feather_radius=150, brightness_factor=0.95):
"""
"""
if mask.dtype != np.uint8:
mask = (mask * 255).astype(np.uint8)
if len(mask.shape) > 2:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
feathered_mask = photoshop_style_feather(original_img, mask, feather_radius)
feathered_mask_float = feathered_mask.astype(np.float32) / 255.0
if len(original_img.shape) == 3 and len(feathered_mask_float.shape) == 2:
feathered_mask_float = np.stack([feathered_mask_float] * 3, axis=-1)
def to_linear(img):
img_linear = img.astype(np.float32) / 255.0
return np.where(img_linear <= 0.04045,
img_linear / 12.92,
((img_linear + 0.055) / 1.055) ** 2.4)
def to_srgb(img_linear):
return np.where(img_linear <= 0.0031308,
img_linear * 12.92,
1.055 * (img_linear ** (1 / 2.4)) - 0.055)
adjusted_linear = to_linear(adjusted_img)
original_linear = to_linear(original_img)
luminance_adjustment = np.mean(original_linear, axis=-1, keepdims=True) * (1.0 - brightness_factor)
adjusted_linear_corrected = adjusted_linear - luminance_adjustment
blended_linear = (adjusted_linear_corrected * feathered_mask_float +
original_linear * (1 - feathered_mask_float))
blended_srgb = to_srgb(blended_linear)
blended_img = np.clip(blended_srgb * 255, 0, 255).astype(np.uint8)
return blended_img
def rgb2lab_image(rgb_img):
""""""
rgb = rgb_img.astype(np.float32) / 255.0
mask = rgb > 0.04045
rgb = np.where(mask,
np.power((rgb + 0.055) / 1.055, 2.4),
rgb / 12.92)
XYZ = np.dot(rgb, [
[0.436052025, 0.222491598, 0.013929122],
[0.385081593, 0.716886060, 0.097097002],
[0.143087414, 0.060621486, 0.714185470]
])
XYZ *= np.array([100.0, 100.0, 100.0]) / [96.4221, 100.0, 82.5211]
epsilon = 0.008856
kappa = 903.3
XYZ_norm = np.where(XYZ > epsilon,
np.power(XYZ, 1 / 3),
(kappa * XYZ + 16) / 116)
L = 116 * XYZ_norm[..., 1] - 16
a = 500 * (XYZ_norm[..., 0] - XYZ_norm[..., 1])
b = 200 * (XYZ_norm[..., 1] - XYZ_norm[..., 2])
return np.stack([L, a, b], axis=-1)
def photoshop_lab_color_range_optimized(bgr_img, target_lab, tolerance=59, anti_alias=True):
""""""
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
lab_img = rgb2lab_image(rgb_img)
L, a, b = lab_img[:, :, 0], lab_img[:, :, 1], lab_img[:, :, 2]
target_L, target_a, target_b = target_lab
diff_L = np.abs(L - target_L)
diff_a = np.abs(a - target_a)
diff_b = np.abs(b - target_b)
dark_boost = np.ones_like(L)
dark_mask = L <40
dark_boost[dark_mask] = 1.2
weighted_diff = np.sqrt(
0.25 * (diff_L / 100) ** 2 +
0.75 * ((diff_a + diff_b) / 255) ** 2
) * 100
weighted_diff = weighted_diff / dark_boost
threshold = 1.6 * (100 - tolerance) / 100 * 23
normalized_diff = weighted_diff / threshold
mask = 0.5 * (np.tanh(4 * (1 - normalized_diff)) + 1)
if anti_alias:
mask = cv2.GaussianBlur(mask, (5, 5), 0)
return mask
def photoshop_actions_emulation(input_path, output_path):
""""""
original_img = cv2.imread(input_path)
target_lab = np.array([47.89, 20.31, 20.6], dtype=np.float32)
tol= 81
mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol)
mask_uint8 = (mask * 255).astype(np.uint8)
adjusted_img = adjust_levels_image(original_img)
result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8,
feather_radius=150, brightness_factor=0.90)
cv2.imwrite(output_path, result)
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--image_name', type=str, default='274351Tex1_adjusted060518_2_221.jpg')
arg.add_argument('--image_name_new', type=str, default='274351Tex1_adjusted060518_2_221_999999.jpg')
arg.add_argument('--in_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/output/')
arg.add_argument('--out_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/shadow_up/')
args = arg.parse_args()
os.makedirs(args.out_dir,exist_ok=True)
input_path = os.path.join(args.in_dir,args.image_name)
output_path = os.path.join(args.out_dir,args.image_name_new)
photoshop_actions_emulation(input_path, output_path)

139
apps/white_purification_v1.py

@ -0,0 +1,139 @@
import os.path
import shutil
import time
import argparse
import cv2
import numpy as np
from fix_up_color_two import remove_gray_and_sharpening
from ps_image_shadow_up_ag import photoshop_actions_emulation
def perceptual_adjustment(img, threshold=220, reduction=0.5):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
saturation_weights = 1 - (s.astype(np.float32) / 255 * 0.01)
adjusted_v = np.where(
v > threshold,
threshold + (v - threshold) * (1 - reduction * saturation_weights),
v
)
return cv2.cvtColor(cv2.merge([h, s, adjusted_v.astype(np.uint8)]), cv2.COLOR_HSV2BGR)
def process_image(input_path, output_path, threshold=210, reduction=0.6):
"""
"""
try:
img = cv2.imread(input_path)
if img is None:
raise ValueError("无法读取图像,请检查路径是否正确")
result = perceptual_adjustment(img, threshold, reduction)
cv2.imwrite(output_path, result)
print(f"处理成功,结果已保存到: {output_path}")
return True
except Exception as e:
print(f"处理失败: {str(e)}")
return False
def sigmoid(x, center=0.0, slope=10.0):
return 1 / (1 + np.exp(-slope * (x - center)))
def reduce_highlights_lab_advanced_hsvmask(img, highlight_thresh=220, strength=30, sigma=15):
"""
"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
V = hsv[:, :, 2].astype(np.float32)
mask = sigmoid(V, center=highlight_thresh, slope=0.05)
mask = np.clip(mask, 0, 1)
mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=2)
mask_vis = (mask * 255).astype(np.uint8)
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
L, a, b = cv2.split(img_lab)
L = L.astype(np.float32)
L_blur = cv2.GaussianBlur(L, (0, 0), sigma)
L_detail = L - L_blur
L_dark = np.clip(L_blur - strength * mask, 0, 255)
L_new = np.clip(L_dark + L_detail, 0, 255).astype(np.uint8)
lab_new = cv2.merge([L_new, a, b])
result = cv2.cvtColor(lab_new, cv2.COLOR_Lab2BGR)
return result, mask_vis
def correct_light_again_hsv(image_path):
img = cv2.imread(image_path)
result, mask_vis = reduce_highlights_lab_advanced_hsvmask(
img,
highlight_thresh=210,
strength=10,
sigma=3
)
output_image_path = image_path.replace(".jpg", "_light02.jpg")
cv2.imwrite(
output_image_path,
result
)
return output_image_path
def correct_texture_image(input_path,image_result_dir,output_path):
""""""
#input_path = os.path.join(image_in_dir, image_name)
image_name= input_path.split("/")[-1]
params = {
'threshold': 200,
'reduction': 0.6
}
image_cache_dir= os.path.join(image_result_dir,"cache")
os.makedirs(image_cache_dir, exist_ok=True)
out_put_path = os.path.join(image_cache_dir, image_name)
shadow_up_path = out_put_path.replace(".jpg", "_shadow_up.jpg")
photoshop_actions_emulation(input_path, shadow_up_path)
image_light_down_fix_up_path = remove_gray_and_sharpening(shadow_up_path, image_cache_dir)
output_light_up_path = image_light_down_fix_up_path.replace(".jpg", "_light_down.jpg")
process_image(image_light_down_fix_up_path, output_light_up_path, **params)
output_result_image_path=correct_light_again_hsv(output_light_up_path)
shutil.copy(output_result_image_path,output_path)
time.sleep(1)
try:
shutil.rmtree(image_cache_dir)
# os.remove(shadow_up_path)
# os.remove(image_light_down_fix_up_path)
# os.remove(output_light_up_path)
# os.remove(output_result_image_path)
except:
print("删除文件错误")
if __name__ == "__main__":
arg = argparse.ArgumentParser()
arg.add_argument("-i","--image_path", type=str, default="")
args = arg.parse_args()
image_result_dir=os.path.dirname(args.image_path)
os.makedirs(image_result_dir, exist_ok=True)
start_time= time.time()
correct_texture_image(args.input_path,image_result_dir,args.image_path)
end_time = time.time()
total_time = round(end_time - start_time, 2)
print(f"处理成功,耗时 {total_time} 秒,")
"""
1暗部提亮->白色提纯(220)->高光压暗->二次亮度调整
"""
Loading…
Cancel
Save