Browse Source

v4 白色提纯

master
dongchangxi 6 months ago
parent
commit
6779b08fa8
  1. 4
      apps/auto_convert3d_new.py
  2. 245
      apps/fix_up_color_two_a.py
  3. 204
      apps/ps_image_shadow_up_ag_two_a.py
  4. 469
      apps/white_purification_v4.py

4
apps/auto_convert3d_new.py

@ -3,7 +3,7 @@ import os, oss2, time, redis, requests, shutil, sys, subprocess, json, platform @@ -3,7 +3,7 @@ import os, oss2, time, redis, requests, shutil, sys, subprocess, json, platform
from PIL import Image, ImageDraw, ImageFont
from retrying import retry
import atexit,platform
import white_purification_v3,white_purification
import white_purification_v4,white_purification
if platform.system() == 'Windows':
sys.path.append('e:\\libs\\')
import common
@ -152,7 +152,7 @@ def team_check(r): @@ -152,7 +152,7 @@ def team_check(r):
imagePath = os.path.join(workdir, 'print', pid,pid+"Tex1.jpg")
print("开始处理白色提纯")
#white_purification.white_purification_utils(imagePath)
os.system(f'python D:\\make2\\apps\white_purification_v3.py -i {imagePath}')
os.system(f'python D:\\make2\\apps\white_purification_v4.py -i {imagePath}')
print("贴图文件白色提纯完成",imagePath)
#提纯完重新上传提纯图片

245
apps/fix_up_color_two_a.py

@ -0,0 +1,245 @@ @@ -0,0 +1,245 @@
import cv2, numpy as np, matplotlib.pyplot as plt, os, shutil, argparse, random, time, math
from tqdm import tqdm
def ps_color_scale_adjustment(image, shadow=0, highlight=255, midtones=1):
'''
模拟 PS 的色阶调整 0 <= Shadow < Highlight <= 255
:param image: 传入的图片
:param shadow: 黑场(0-Highlight)
:param highlight: 白场(Shadow-255)
:param midtones: 灰场(9.99-0.01)
:return: 图片
'''
if highlight >255:
highlight = 255
if shadow < 0:
shadow = 0
if shadow >= highlight:
shadow = highlight - 2
if midtones > 9.99:
midtones = 9.99
if midtones < 0.01:
midtones = 0.01
image = np.array(image, dtype=np.float16)
# 计算白场 黑场离差
Diff = highlight - shadow
image = image - shadow
image[image < 0] = 0
image = (image / Diff) ** (1 / midtones) * 255
image[image > 255] = 255
image = np.array(image, dtype=np.uint8)
return image
# def show_histogram(image, image_id, save_hist_dir, min_threshold, max_threshold):
# '''
# 画出直方图展示
# :param image: 导入图片
# :param image_id: 图片id编号
# :param save_hist_dir: 保存路径
# :param min_threshold: 最小阈值
# :param max_threshold: 最大阈值
# :return: 原图image,和裁剪原图直方图高低阈值后的图片image_change
# '''
# plt.rcParams['font.family'] = 'SimHei'
# plt.rcParams['axes.unicode_minus'] = False
# plt.hist(image.ravel(), 254, range=(2, 256), density=False)
# plt.hist(image.ravel(), 96, range=(2, 50), density=False) # 放大 range(0, 50),bins值最好是range的两倍,显得更稀疏,便于对比
# plt.hist(image.ravel(), 110, range=(200, 255), density=False) # 放大 range(225, 255)
# plt.annotate('thresh1=' + str(min_threshold), # 文本内容
# xy=(min_threshold, 0), # 箭头指向位置 # 阈值设定值!
# xytext=(min_threshold, 500000), # 文本位置 # 阈值设定值!
# arrowprops=dict(facecolor='black', width=1, shrink=5, headwidth=2)) # 箭头
# plt.annotate('thresh2=' + str(max_threshold), # 文本内容
# xy=(max_threshold, 0), # 箭头指向位置 # 阈值设定值!
# xytext=(max_threshold, 500000), # 文本位置 # 阈值设定值!
# arrowprops=dict(facecolor='black', width=1, shrink=5, headwidth=2)) # 箭头
# # 在y轴上绘制一条直线
# # plt.axhline(y=10000, color='r', linestyle='--', linewidth=0.5)
# plt.title(str(image_id))
# # plt.show()
# # 保存直方图
# save_hist_name = os.path.join(save_hist_dir, f'{image_id}_{min_threshold}&{max_threshold}.jpg')
# plt.savefig(save_hist_name)
# # 清空画布, 防止重叠展示
# plt.clf()
# def low_find_histogram_range(image, target_frequency):
# '''
# 循环查找在 target_frequency (y)频次限制下的直方图区间值(x)
# :param image: 导入图片
# :param target_frequency: 直方图 y 频次限制条件
# :return: 直方图区间 x,和 该区间频次 y
# '''
# # 计算灰度直方图
# hist, bins = np.histogram(image, bins=256, range=[0, 256])
# # 初始化区间和频次
# interval = 2
# frequency = hist[255]
# while frequency < target_frequency:
# # 更新区间和频次
# interval += 1
# # 检查直方图的频次是否为None,如果频次是None,则将其设为0,这样可以避免将None和int进行比较报错。
# frequency = hist[interval] if hist[interval] is not None else 0
# frequency += hist[interval] if hist[interval] is not None else 0
# # 如果频次接近10000则停止循环
# if target_frequency - 2000 <= frequency <= target_frequency + 1000:
# break
#
# return interval, frequency
# def high_find_histogram_range(image, target_frequency):
# '''
# 循环查找在 target_frequency (y)频次限制下的直方图区间值(x)
# :param image: 导入图片
# :param target_frequency: 直方图 y 频次限制条件
# :return: 直方图区间 x,和 该区间频次 y
# '''
# # 计算灰度直方图
# hist, bins = np.histogram(image, bins=256, range=[0, 256])
# # 初始化区间和频次
# interval = 255
# frequency = hist[255]
# while frequency < target_frequency:
# # 更新区间和频次
# interval -= 1
# # 检查直方图的频次是否为None,如果频次是None,则将其设为0,这样可以避免将None和int进行比较报错。
# frequency = hist[interval] if hist[interval] is not None else 0
# frequency += hist[interval] if hist[interval] is not None else 0
# # 如果频次接近10000则停止循环
# if target_frequency - 2000 <= frequency <= target_frequency + 2000:
# break
#
# return interval, frequency
def find_last_x(image, slope_threshold = 1000):
x = []
y = []
hist, bins = np.histogram(image, bins=256, range=[0, 256])
#找到50以内的最高峰
max_y = 0
max_i = 5
for i in range(5, 25):
if hist[i] > max_y:
max_y = hist[i]
max_i = i
print(f'50以内最高峰值y:{max_y},最高峰位置x:{max_i}')
for i in range(2, max_i):
x.append(i)
y.append(hist[i])
slopes = [abs(y[i + 1] - y[i]) for i in range(len(x) - 1)]
current_interval = []
max_interval = []
max_x = {}
for i, slope in enumerate(slopes):
current_interval.append(slope)
if slope >= slope_threshold:
if len(current_interval) > len(max_interval):
max_interval = current_interval.copy()
max_x[x[i]] = slope
current_interval = []
if not max_x:
# 尝试降低阈值重新计算
return find_last_x(image, slope_threshold=slope_threshold // 2)
print(max_x)
last_x = list(max_x)[-1]
last_y = max_x[last_x]
return last_x, last_y
def find_last_high(image, slope_threshold = 2500):
x = []
y = []
hist, bins = np.histogram(image, bins=255, range=[2, 255])
#找到200以上的最高峰
max_y = 0
max_i = 254
for i in range(240, 255):
if hist[i] > max_y:
max_y = hist[i]
max_i = i
print(f'200以上的最高峰值y:{max_y},最高峰位置x:{max_i}')
for i in range(max_i, 255):
x.append(i)
y.append(hist[i])
slopes = [abs(y[i + 1] - y[i]) for i in range(len(x) - 1)]
current_interval = []
max_interval = []
max_x = {}
find = False
for i in range(len(slopes) - 1, -1, -1):
slope = slopes[i]
current_interval.append(slope)
if slope >= slope_threshold:
find = True
if len(current_interval) > len(max_interval):
max_interval = current_interval.copy()
max_x[x[i]] = slope
current_interval = []
#如果没有找到200以上很平,而且高度小于5000,就按240位置削平
if not find and hist[240] < 5000:
max_x[240] = hist[240]
print(max_x)
if len(max_x) > 0:
last_x = list(max_x)[0]
last_y = max_x[last_x]
if last_x < 240:
last_x = 240
# last_y = max_x[last_x]
else:
print(f'找不到200以上曲线较平的区间,使用254作为最高峰')
last_x = 254
last_y = hist[254]
return last_x, last_y
def remove_gray_and_sharpening(jpg_path,save_hist_dir):
input_image = cv2.imread(jpg_path)
filename = os.path.basename(jpg_path)
# low_x_thresh, low_y_frequency = low_find_histogram_range(input_image, low_y_limit)
low_x_thresh, low_y_frequency = find_last_x(input_image)
# high_x_thresh, high_y_frequency = high_find_histogram_range(input_image, high_y_limit)
high_x_thresh, high_y_frequency = find_last_high(input_image)
# print(f"{low_x_thresh} 区间, {low_y_frequency} 频次")
# print(f"{high_x_thresh} 区间, {high_y_frequency} 频次")
output_filename = os.path.join(save_hist_dir, filename).replace('.jpg', f'_{low_x_thresh}_{high_x_thresh}.jpg')
high_output_image = ps_color_scale_adjustment(input_image, shadow=low_x_thresh, highlight=high_x_thresh, midtones=1)
# high_output_image = ps_color_scale_adjustment(low_ouput_image, shadow=0, highlight=high_x_thresh, midtones=1)
# # 人体贴图和黑色背景交界处不进行锐化
# gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
# _, thresh = cv2.threshold(gray, 2, 255, cv2.THRESH_BINARY)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
# gradient = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel)
# roi_gradient = cv2.bitwise_and(high_output_image, high_output_image, mask=gradient)
# # 锐化滤波器
# # sharpened_image = sharpening_filter(high_output_image)
# sharpened_image = reduce_sharpness(high_output_image, factor=4)
# # 将原图边界替换锐化后的图片边界
# sharpened_image[gradient != 0] = roi_gradient[gradient != 0]
# 直方图标记并保存
# show_histogram(input_image, img_id, low_x_thresh, high_x_thresh)
# cv2.imwrite(jpg_path, high_output_image, [cv2.IMWRITE_JPEG_QUALITY, 95]) # 保存图片的质量是原图的 95%
cv2.imwrite(output_filename, high_output_image, [cv2.IMWRITE_JPEG_QUALITY, 95]) # 保存图片的质量是原图的 95%
return output_filename
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--jpg_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/input')
arg.add_argument('--save_hist_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/output')
args = arg.parse_args()
for img_id in tqdm(os.listdir(args.jpg_dir)):
jpg_path = os.path.join(args.jpg_dir, img_id)
remove_gray_and_sharpening(jpg_path,args.save_hist_dir)

204
apps/ps_image_shadow_up_ag_two_a.py

@ -0,0 +1,204 @@ @@ -0,0 +1,204 @@
import os.path
import numpy as np
import cv2
import argparse
import matplotlib.pyplot as plt
def adjust_levels_image(img):
""""""
img = img.astype(np.float32)
img = 255 * ((img - 20) / (241 - 20))
img[img < 0] = 0
img[img > 255] = 255
img = 255 * np.power(img / 255.0, 1.0 / 1.34)
img = (img / 255) * (255- 0) + 0
img[img < 0] = 0
img[img > 255] = 255
img = img.astype(np.uint8)
return img
def photoshop_style_feather(image, mask, radius=150):
"""
"""
if mask.dtype != np.uint8:
mask = (mask * 255).astype(np.uint8)
if len(mask.shape) > 2:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
kernel_size = max(3, int(2 * np.ceil(2 * radius) + 1))
expanded_size = (mask.shape[0] + 2 * radius, mask.shape[1] + 2 * radius)
expanded_mask = np.zeros(expanded_size, dtype=np.uint8)
center_y, center_x = radius, radius
expanded_mask[center_y:center_y + mask.shape[0],
center_x:center_x + mask.shape[1]] = mask
blurred_expanded_mask = cv2.GaussianBlur(
expanded_mask,
(kernel_size, kernel_size),
sigmaX=radius,
sigmaY=radius,
borderType=cv2.BORDER_REFLECT_101
)
feathered_mask = blurred_expanded_mask[center_y:center_y + mask.shape[0],
center_x:center_x + mask.shape[1]]
feathered_mask = feathered_mask.astype(np.float32) / 255.0
feathered_mask = np.power(feathered_mask, 1.1) # 轻微增强对比度
feathered_mask = np.clip(feathered_mask * 255, 0, 255).astype(np.uint8)
return feathered_mask
def calculate_luminance(img):
""""""
if len(img.shape) == 3:
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
return ycrcb[:, :, 0].astype(np.float32) / 255.0
else:
return img.astype(np.float32) / 255.0
def photoshop_feather_blend(adjusted_img, original_img, mask, feather_radius=150, brightness_factor=0.95):
"""
"""
if mask.dtype != np.uint8:
mask = (mask * 255).astype(np.uint8)
if len(mask.shape) > 2:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# plt.figure(figsize=(10, 8))
# plt.imshow(mask, cmap='gray')
# plt.title("Feathered Mask")
# plt.axis('off')
# plt.colorbar(label='Opacity')
# plt.show()
feathered_mask = photoshop_style_feather(original_img, mask, feather_radius)
# plt.figure(figsize=(10, 8))
# plt.imshow(feathered_mask, cmap='gray')
# plt.title("Feathered Mask")
# plt.axis('off')
# plt.colorbar(label='Opacity')
# plt.show()
feathered_mask_float = feathered_mask.astype(np.float32) / 255.0
if len(original_img.shape) == 3 and len(feathered_mask_float.shape) == 2:
feathered_mask_float = np.stack([feathered_mask_float] * 3, axis=-1)
def to_linear(img):
img_linear = img.astype(np.float32) / 255.0
return np.where(img_linear <= 0.04045,
img_linear / 12.92,
((img_linear + 0.055) / 1.055) ** 2.4)
def to_srgb(img_linear):
return np.where(img_linear <= 0.0031308,
img_linear * 12.92,
1.055 * (img_linear ** (1 / 2.4)) - 0.055)
adjusted_linear = to_linear(adjusted_img)
original_linear = to_linear(original_img)
luminance_adjustment = np.mean(original_linear, axis=-1, keepdims=True) * (1.0 - brightness_factor)
adjusted_linear_corrected = adjusted_linear - luminance_adjustment
blended_linear = (adjusted_linear_corrected * feathered_mask_float +
original_linear * (1 - feathered_mask_float))
blended_srgb = to_srgb(blended_linear)
blended_img = np.clip(blended_srgb * 255, 0, 255).astype(np.uint8)
return blended_img
def rgb2lab_image(rgb_img):
""""""
rgb = rgb_img.astype(np.float32) / 255.0
mask = rgb > 0.04045
rgb = np.where(mask,
np.power((rgb + 0.055) / 1.055, 2.4),
rgb / 12.92)
XYZ = np.dot(rgb, [
[0.436052025, 0.222491598, 0.013929122],
[0.385081593, 0.716886060, 0.097097002],
[0.143087414, 0.060621486, 0.714185470]
])
XYZ *= np.array([100.0, 100.0, 100.0]) / [96.4221, 100.0, 82.5211]
epsilon = 0.008856
kappa = 903.3
XYZ_norm = np.where(XYZ > epsilon,
np.power(XYZ, 1 / 3),
(kappa * XYZ + 16) / 116)
L = 116 * XYZ_norm[..., 1] - 16
a = 500 * (XYZ_norm[..., 0] - XYZ_norm[..., 1])
b = 200 * (XYZ_norm[..., 1] - XYZ_norm[..., 2])
return np.stack([L, a, b], axis=-1)
def photoshop_lab_color_range_optimized(bgr_img, target_lab, tolerance=59, anti_alias=True):
""""""
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
lab_img = rgb2lab_image(rgb_img)
L, a, b = lab_img[:, :, 0], lab_img[:, :, 1], lab_img[:, :, 2]
target_L, target_a, target_b = target_lab
diff_L = np.abs(L - target_L)
diff_a = np.abs(a - target_a)
diff_b = np.abs(b - target_b)
dark_boost = np.ones_like(L)
dark_mask = L <40
dark_boost[dark_mask] = 1.2
weighted_diff = np.sqrt(
0.25 * (diff_L / 100) ** 2 +
0.75 * ((diff_a + diff_b) / 255) ** 2
) * 100
weighted_diff = weighted_diff / dark_boost
threshold = 1.6 * (100 - tolerance) / 100 * 23
normalized_diff = weighted_diff / threshold
mask = 0.5 * (np.tanh(4 * (1 - normalized_diff)) + 1)
if anti_alias:
mask = cv2.GaussianBlur(mask, (5, 5), 0)
return mask
def photoshop_actions_emulation(input_path, output_path):
""""""
original_img = cv2.imread(input_path)
target_lab = np.array([47.89, 20.31, 20.6], dtype=np.float32)
tol= 81
mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol)
mask_uint8 = (mask * 255).astype(np.uint8)
adjusted_img = adjust_levels_image(original_img)
result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8,
feather_radius=15, brightness_factor=0.95)
cv2.imwrite(output_path, result)
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--image_name', type=str, default='274351Tex1_adjusted060518_2_221.jpg')
arg.add_argument('--image_name_new', type=str, default='274351Tex1_adjusted060518_2_221_999999.jpg')
arg.add_argument('--in_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/output/')
arg.add_argument('--out_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/shadow_up/')
args = arg.parse_args()
os.makedirs(args.out_dir,exist_ok=True)
input_path = os.path.join(args.in_dir,args.image_name)
output_path = os.path.join(args.out_dir,args.image_name_new)
photoshop_actions_emulation(input_path, output_path)

469
apps/white_purification_v4.py

@ -0,0 +1,469 @@ @@ -0,0 +1,469 @@
import os.path
import shutil
import time
import argparse
import cv2
import numpy as np
from scipy.interpolate import CubicSpline
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from fix_up_color_two_a import remove_gray_and_sharpening
from ps_image_shadow_up_ag_two_a import photoshop_actions_emulation
# def perceptual_adjustment(img, threshold=220, reduction=0.5):
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split(hsv)
#
# saturation_weights = 1 - (s.astype(np.float32) / 255 * 0.01)
#
# adjusted_v = np.where(
# v > threshold,
# threshold + (v - threshold) * (1 - reduction * saturation_weights),
# v
# )
#
# return cv2.cvtColor(cv2.merge([h, s, adjusted_v.astype(np.uint8)]), cv2.COLOR_HSV2BGR)
# def perceptual_smooth_adjustment(img, threshold=220, reduction=0.5,margin=5, saturation_sensitivity=0.3):
# """
# 感知式亮度压制 + 过渡区平滑(防止硬边)
#
# 参数:
# - threshold: 高光压制起点
# - margin: 过渡带宽度(像素值差)
# - reduction: 压制比例(1 表示最多降低100%)
# - saturation_sensitivity: 饱和度影响权重
# """
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split(hsv)
#
# v = v.astype(np.float32)
# s = s.astype(np.float32)
#
# # 1. 饱和度感知权重(饱和度越高,压制越弱)
# sat_weight = 1.0 - (s / 255.0 * saturation_sensitivity)
# sat_weight = np.clip(sat_weight, 0.0, 1.0)
#
# # 2. 构建平滑过渡权重(根据 V 值)
# transition_mask = np.zeros_like(v, dtype=np.float32)
# transition_mask[v <= threshold] = 0.0
# transition_mask[v >= threshold + margin] = 1.0
#
# # 线性过渡区域
# in_between = (v > threshold) & (v < threshold + margin)
# transition_mask[in_between] = (v[in_between] - threshold) / margin
#
# # 3. 计算最终压制权重(融合过渡 + 饱和度感知)
# weight = reduction * transition_mask * sat_weight
#
# # 4. 应用压制
# v_adjusted = v - (v - threshold) * weight
# v_adjusted = np.clip(v_adjusted, 0, 255).astype(np.uint8)
#
# # 5. 合成并返回
# adjusted_hsv = cv2.merge([h, s.astype(np.uint8), v_adjusted])
# result_bgr = cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR)
#
# return result_bgr
def smootherstep(x):
"""五次平滑插值函数:更加平滑过渡"""
return x**3 * (x * (x * 6 - 15) + 10)
def perceptual_smooth_adjustment_color_blend(img, threshold=220, reduction=0.5, margin=10, saturation_sensitivity=0.3, blur_radius=5, color_blend_strength=0.5):
"""
更平滑颜色融合感知亮度压制
- threshold: 压制起始亮度V 通道
- reduction: 压制强度0-1
- margin: 阈值过渡区间像素亮度差
- saturation_sensitivity: 饱和度高时减弱压制
- blur_radius: 用于颜色融合的模糊半径
- color_blend_strength: 颜色融合程度0~1
"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v = v.astype(np.float32)
s = s.astype(np.float32)
# 饱和度感知压制减弱
sat_weight = 1.0 - (s / 255.0 * saturation_sensitivity)
sat_weight = np.clip(sat_weight, 0.0, 1.0)
# 平滑压制权重计算
delta = v - threshold
transition = np.zeros_like(v, dtype=np.float32)
in_range = (delta > 0) & (delta < margin)
transition[in_range] = smootherstep(delta[in_range] / margin)
transition[delta >= margin] = 1.0
# 压制权重融合
weight = reduction * transition * sat_weight
# 应用压制
v_new = v - (v - threshold) * weight
v_new = np.clip(v_new, 0, 255).astype(np.uint8)
# 合成压制后的图像
adjusted_hsv = cv2.merge([h, s.astype(np.uint8), v_new])
adjusted = cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR)
# -------------------
# 融合原图模糊版 → 减少颜色突兀
# -------------------
blurred = cv2.GaussianBlur(img, (blur_radius | 1, blur_radius | 1), 0)
# 构建融合权重 mask,仅对过渡区域起作用
color_blend_mask = np.clip(weight, 0, 1) * color_blend_strength
color_blend_mask = color_blend_mask[..., None] # 扩展为 (H,W,1) 用于通道融合
# 融合颜色(让压制后的颜色更靠近周围环境)
final = adjusted.astype(np.float32) * (1 - color_blend_mask) + blurred.astype(np.float32) * color_blend_mask
final = np.clip(final, 0, 255).astype(np.uint8)
return final
# def perceptual_smooth_adjustment(img, threshold=220, reduction=0.5, margin=10, saturation_sensitivity=0.3, blur_radius=3):
# """
# 感知式亮度压制 + 平滑过渡 + 边缘柔化
#
# 参数:
# - threshold: 开始压制的亮度阈值
# - reduction: 最大压制比例(1 表示压到底)
# - margin: 过渡宽度(单位是亮度值差)
# - saturation_sensitivity: 饱和度越高,压制越弱
# - blur_radius: 最终压制结果边缘模糊程度(建议 3)
# """
# # 转 HSV 获取亮度与饱和度
# hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# h, s, v = cv2.split(hsv)
#
# v = v.astype(np.float32)
# s = s.astype(np.float32)
#
# # 1. 饱和度影响(越高压制越少)
# sat_weight = 1.0 - (s / 255.0 * saturation_sensitivity)
# sat_weight = np.clip(sat_weight, 0.0, 1.0)
#
# # 2. 构造更平滑的过渡权重(使用 smootherstep)
# delta = v - threshold
# transition = np.zeros_like(v, dtype=np.float32)
#
# in_range = (delta > 0) & (delta < margin)
# transition[in_range] = smootherstep(delta[in_range] / margin)
# transition[delta >= margin] = 1.0 # 完全压制区域
#
# # 3. 亮度压制权重
# weight = reduction * transition * sat_weight
#
# # 4. 应用压制
# v_new = v - (v - threshold) * weight
# v_new = np.clip(v_new, 0, 255).astype(np.uint8)
#
# # 5. 合并回 HSV 并转回 BGR
# adjusted_hsv = cv2.merge([h, s.astype(np.uint8), v_new])
# result = cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR)
#
# # 6. 进一步边缘模糊(仅作用于过渡区域)
# blur_mask = (transition > 0.0) & (transition < 1.0)
# blur_mask = blur_mask.astype(np.uint8) * 255
# blur_mask = cv2.GaussianBlur(blur_mask, (blur_radius|1, blur_radius|1), 0)
#
# # 创建模糊版本
# blurred = cv2.GaussianBlur(result, (blur_radius|1, blur_radius|1), 0)
#
# # 混合:边缘模糊区域取模糊图,其他保持原图
# blur_mask = blur_mask.astype(np.float32) / 255.0
# result = result.astype(np.float32)
# blurred = blurred.astype(np.float32)
#
# final = result * (1 - blur_mask[..., None]) + blurred * blur_mask[..., None]
# final = np.clip(final, 0, 255).astype(np.uint8)
#
# return final
def process_image(input_path, output_path, threshold=210, reduction=0.6):
"""
"""
try:
img = cv2.imread(input_path)
if img is None:
raise ValueError("无法读取图像,请检查路径是否正确")
#result = perceptual_adjustment(img, threshold, reduction)
result = perceptual_smooth_adjustment_color_blend(img, threshold, reduction)
cv2.imwrite(output_path, result)
print(f"处理成功,结果已保存到: {output_path}")
return True
except Exception as e:
print(f"处理失败: {str(e)}")
return False
def sigmoid(x, center=0.0, slope=10.0):
return 1 / (1 + np.exp(-slope * (x - center)))
def reduce_highlights_lab_advanced_hsvmask(
img,
highlight_thresh=220,
strength=30,
sigma=15,
detail_boost=1.0,
preserve_local_contrast=True
):
"""
LAB高光压制 + HSV感知蒙版 + 细节保留
"""
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
V = hsv[:, :, 2].astype(np.float32)
# 1. 生成高光 mask,过渡平滑
mask = sigmoid(V, center=highlight_thresh, slope=0.05)
mask = np.clip(mask, 0, 1)
mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=2)
mask_vis = (mask * 255).astype(np.uint8)
# 2. LAB 空间亮度压制
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
L, a, b = cv2.split(img_lab)
L = L.astype(np.float32)
# 3. 模糊和细节
L_blur = cv2.GaussianBlur(L, (0, 0), sigma)
L_detail = L - L_blur
# 4. 替代方案:压制 L,但融合方式更柔和
L_target = L_blur - strength * mask
L_target = np.clip(L_target, 0, 255)
if preserve_local_contrast:
# 保留细节 + 局部对比度(避免过度平滑)
L_new = L_target + detail_boost * L_detail
else:
# 单纯压制亮度
L_new = L_target
L_new = np.clip(L_new, 0, 255).astype(np.uint8)
# 5. 合成回去
lab_new = cv2.merge([L_new, a, b])
result = cv2.cvtColor(lab_new, cv2.COLOR_Lab2BGR)
return result, mask_vis
def suppress_highlights_keep_texture(image_bgr, v_thresh=225, target_v=215, sigma=1):
""""""
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image_hsv)
v = v.astype(np.float32)
v_blur = cv2.GaussianBlur(v, (0, 0), sigmaX=sigma)
detail = v - v_blur
# 构建 soft mask(0~1),用于动态压制
mask = (v_blur > v_thresh).astype(np.float32)
# weight 越大压得越狠
weight = np.clip((v_blur - v_thresh) / 20.0, 0, 1) * mask # 20 是压制带宽
#weight =weight*1.2
# 将亮度压到 target_v 的线性混合:
v_compress = v_blur * (1 - weight) + target_v * weight
v_new = v_compress + detail
v_new = np.clip(v_new, 0, 255).astype(np.uint8)
hsv_new = cv2.merge([h, s, v_new])
result_bgr = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)
return result_bgr
def correct_light_again_hsv(image_path):
img = cv2.imread(image_path)
result, mask_vis = reduce_highlights_lab_advanced_hsvmask(
img,
highlight_thresh=225,
strength=15,
sigma=10,
detail_boost=1.2
)
result_bgr= suppress_highlights_keep_texture(result)
output_image_path = image_path.replace(".jpg", "_light02.jpg")
cv2.imwrite(
output_image_path,
result_bgr
)
return output_image_path
def generate_curve_lut(x_points, y_points):
"""
输入采样点生成 256 长度的查找表LUT
"""
cs = CubicSpline(x_points, y_points, bc_type='natural')
x = np.arange(256)
y = cs(x)
y = np.clip(y, 0, 255).astype(np.uint8)
return y
def apply_curve(img, lut):
"""
对图像的每个通道应用曲线 LUT复合通道
"""
result = cv2.LUT(img, lut)
return result
def apply_curve_up_image(image_path,image_cache_dir):
"""提亮"""
x_points = [0, 124, 255]
y_points = [0, 131, 255]
lut = generate_curve_lut(x_points, y_points)
#adjusted = apply_curve(img, lut)
image_name_result = image_path.split("/")[-1].replace(".jpg", "_up.jpg")
result_path= os.path.join(image_cache_dir,image_name_result)
count=0
while True:
# image_path="/data/datasets_20t/Downloads_google/correct_show_obj/265340/265340Tex1.jpg"
# result_path = "/data/datasets_20t/Downloads_google/correct_show_obj/265340/265340Tex1_new.jpg"
if os.path.exists(result_path):
image_bgr = cv2.imread(result_path)
else:
image_bgr = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV).astype(np.float32)
h, s, v = cv2.split(image_hsv)
h_mean = np.mean(h)
s_mean = np.mean(s)
v_mean = np.mean(v)
print(f"v_mean{v_mean}")
if v_mean<60:
adjusted = apply_curve(image_bgr, lut)
cv2.imwrite(result_path, adjusted)
time.sleep(1)
count=count+1
else:
break
if count>=1:
cv2.imwrite(result_path, adjusted)
time.sleep(1)
break
if os.path.exists(result_path):
return result_path
else:
return None
def apply_curve_down_image(image_path,image_cache_dir):
"""压暗"""
x_points = [0, 131, 255]
y_points = [0, 124, 255]
lut = generate_curve_lut(x_points, y_points)
# adjusted = apply_curve(img, lut)
image_name_result = image_path.split("/")[-1].replace(".jpg", "_down.jpg")
result_path= os.path.join(image_cache_dir,image_name_result)
count = 0
while True:
# image_path="/data/datasets_20t/Downloads_google/correct_show_obj/265340/265340Tex1.jpg"
# result_path = "/data/datasets_20t/Downloads_google/correct_show_obj/265340/265340Tex1_new.jpg"
if os.path.exists(result_path):
image_bgr = cv2.imread(result_path)
else:
image_bgr = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV).astype(np.float32)
h, s, v = cv2.split(image_hsv)
h_mean = np.mean(h)
s_mean = np.mean(s)
v_mean = np.mean(v)
print(f"v_mean{v_mean}")
if v_mean > 110:
adjusted = apply_curve(image_bgr, lut)
cv2.imwrite(result_path, adjusted)
time.sleep(1)
count=count+1
else:
break
if count >= 1:
cv2.imwrite(result_path, adjusted)
time.sleep(1)
break
if os.path.exists(result_path):
return result_path
else:
return None
def correct_texture_image(input_path,image_result_dir,output_path):
""""""
#input_path = os.path.join(image_in_dir, image_name)
params = {
'threshold': 220,
'reduction': 0.6
}
image_cache_dir= os.path.join(image_result_dir,"cache")
os.makedirs(image_cache_dir, exist_ok=True)
input_path_cure_up = apply_curve_up_image(input_path,image_cache_dir)
if input_path_cure_up:
input_path_cure_down = input_path_cure_up
else:
input_path_cure_down = input_path
input_path_cure_down_result = apply_curve_down_image(input_path_cure_down,image_cache_dir)
if input_path_cure_down_result:
input_path_correct = input_path_cure_down_result
else:
input_path_correct = input_path_cure_down
print("input_path_correct", input_path_correct)
#image_name = input_path_correct.split("/")[-1]
#out_put_path = os.path.join(image_cache_dir, image_name)
image_light_down_fix_up_path = remove_gray_and_sharpening(input_path_correct, image_cache_dir)
shadow_up_path = image_light_down_fix_up_path.replace(".jpg", "_shadow_up.jpg")
photoshop_actions_emulation(image_light_down_fix_up_path, shadow_up_path)
output_light_up_path = shadow_up_path.replace(".jpg", "_light_down.jpg")
process_image(shadow_up_path, output_light_up_path, **params)
#output_result_image_path=correct_light_again_hsv(output_light_up_path)
shutil.copy(output_light_up_path,output_path)
time.sleep(1)
try:
shutil.rmtree(image_cache_dir)
except:
print("删除文件错误")
return output_light_up_path
if __name__ == "__main__":
arg = argparse.ArgumentParser()
arg.add_argument("-i","--image_path", type=str, default="")
args = arg.parse_args()
image_result_dir=os.path.dirname(args.image_path)
os.makedirs(image_result_dir, exist_ok=True)
start_time= time.time()
correct_texture_image(args.image_path,image_result_dir,args.image_path)
end_time = time.time()
total_time = round(end_time - start_time, 2)
print(f"处理成功,耗时 {total_time} 秒,")
"""
1暗部提亮->白色提纯(220)->高光压暗->二次亮度调整
"""
Loading…
Cancel
Save