import os.path import numpy as np from PIL import Image from scipy.ndimage import gaussian_filter import matplotlib.pyplot as plt from skimage import io, color from PIL import Image import cv2 from skimage.exposure import match_histograms from skimage.color import rgb2lab, lab2rgb import time def rgb_to_lab_photoshop(rgb): """ 将 RGB 转换为 LAB,使用 Photoshop 中的转换公式。 """ # 线性化 RGB r, g, b = rgb[..., 0] / 255.0, rgb[..., 1] / 255.0, rgb[..., 2] / 255.0 r = np.where(r <= 0.04045, r, ((r + 0.055) / 1.055) ** 2.4) g = np.where(g <= 0.04045, g, ((g + 0.055) / 1.055) ** 2.4) b = np.where(b <= 0.04045, b, ((b + 0.055) / 1.055) ** 2.4) x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375 y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750 z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041 # 白点 D65 归一化 x = x / 0.95047 y = y / 1.00000 z = z / 1.08883 # XYZ 到 LAB 转换 epsilon = 0.008856 kappa = 903.3 x = np.where(x > epsilon, x ** (1 / 3), (kappa * x + 16) / 116) y = np.where(y > epsilon, y ** (1 / 3), (kappa * y + 16) / 116) z = np.where(z > epsilon, z ** (1 / 3), (kappa * z + 16) / 116) l = 116 * y - 16 a = 500 * (x - y) b = 200 * (y - z) return np.stack([l, a, b], axis=-1) def create_mask_from_lab(lab_image, target_L, target_a, target_b, tolerance_L=5, tolerance_a=5, tolerance_b=5): """ 根据给定的 L, a, b 值范围,生成一个掩膜。 参数: lab_image: 输入的 LAB 图像 target_L: 目标 L 值 target_a: 目标 a 值 target_b: 目标 b 值 tolerance_L: L 值的容差范围 tolerance_a: a 值的容差范围 tolerance_b: b 值的容差范围 返回: mask: 生成的掩膜(1 表示符合条件,0 表示不符合) """ mask = ( (np.abs(lab_image[..., 0] - target_L) <= tolerance_L) & (np.abs(lab_image[..., 1] - target_a) <= tolerance_a) & (np.abs(lab_image[..., 2] - target_b) <= tolerance_b) ).astype(np.uint8) return mask def filter_contours_by_enclosing_circle(mask, min_diameter=50): """ """ cleaned_mask = np.zeros_like(mask) contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: # 计算最小外接圆 (x, y), radius = cv2.minEnclosingCircle(contour) diameter = 2 * radius # 判断是否能放下指定直径的圆 if diameter >= min_diameter: cv2.drawContours(cleaned_mask, [contour], -1, 255, thickness=cv2.FILLED) return cleaned_mask # def adjust_levels_image(img): # """""" # img = img.astype(np.float32) # img = 255 * ((img - 20) / (241 - 20)) # img[img < 0] = 0 # img[img > 255] = 255 # img = 255 * np.power(img / 255.0, 1.0 / 1.34) # img = (img / 255) * (255- 0) + 0 # img[img < 0] = 0 # img[img > 255] = 255 # img = img.astype(np.uint8) # return img def normalize_coeffs(coeffs): median_val = sorted(coeffs)[1] return np.array([coef - median_val for coef in coeffs]) def img_bgr2rgb(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32) / 255.0 img = img.copy() return img def highlight_handle(img, highlight_coeffs): highlights_alpha = 0.003923 * normalize_coeffs(highlight_coeffs) a = np.diag(np.maximum(highlights_alpha, 0)) * np.eye(3) b = np.diag(-np.minimum(highlights_alpha, 0)) * (1 - np.eye(3)) highlights_alpha = np.sum(a + b, axis=0) img = img / (1 - highlights_alpha.reshape(1, 1, 3)) return img def shadow_handle(img, shadows_coef): shadows_alpha = 0.003923 * normalize_coeffs(shadows_coef) a = np.diag(-np.minimum(shadows_alpha, 0)) * np.eye(3) b = np.diag(np.maximum(shadows_alpha, 0)) * (1 - np.eye(3)) shadows_alpha = np.sum(a + b, axis=0) img = (img - shadows_alpha.reshape(1, 1, 3)) / (1 - shadows_alpha.reshape(1, 1, 3)) return img def mid_tone_handle(img, mid_tone_coeffs): mid_tone_alpha = -0.0033944 * normalize_coeffs(mid_tone_coeffs) f = np.diag(mid_tone_alpha) * (2 * np.eye(3) - 1) mid_tone_gamma = np.exp(np.sum(f, axis=0)) img = np.power(img, mid_tone_gamma.reshape(1, 1, 3)) return img def img_rgb2bgr(img): img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX) img = img.astype(np.uint8) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) return img def adjust_color_balance( image, shadow_coefficients, mid_tone_coefficients, highlight_coefficients): """""" image = img_bgr2rgb(image) image = highlight_handle(image, highlight_coefficients) image = shadow_handle(image, shadow_coefficients) image = mid_tone_handle(image, mid_tone_coefficients) image = np.clip(image, 0, 1) image = img_rgb2bgr(image) cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', image) return image def apply_gamut_mapping(values): """防止颜色溢出的色域映射函数""" # 压缩高光区域以防止过曝 values = np.where(values > 0.9, 0.9 + (values - 0.9) * 0.5, values) # 提亮阴影区域以防止死黑 values = np.where(values < 0.1, 0.1 * (values / 0.1) ** 0.7, values) return np.clip(values, 0, 1) # def match_colors(source, target, mask): # """ # """ # matched = match_histograms(source, target, channel_axis=-1) # mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask # return source * (1 - mask_3ch) + matched * mask_3ch # def match_colors(source, target, mask, strength): # """ # :param source: 源图像 (H,W,3) # :param target: 目标图像 (H,W,3) # :param mask: 遮罩 (H,W) 或 (H,W,3), 值范围 0~1 # :param strength: 颜色匹配强度 (0~1) # """ # matched = match_histograms(source, target, channel_axis=-1) # mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask # mask_weighted = mask_3ch * strength # 控制强度 # return source * (1 - mask_weighted) + matched * mask_weighted def match_colors(source, target, mask, strength, brightness_scale=0.9): matched = match_histograms(source, target, channel_axis=-1) matched = matched * brightness_scale # 降低亮度 mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask mask_weighted = mask_3ch * strength return source * (1 - mask_weighted) + matched * mask_weighted def apply_feathering(mask, radius): """""" mask_float = mask.astype(np.float32) / 255.0 blurred = cv2.GaussianBlur(mask_float, (0, 0), sigmaX=radius / 3, sigmaY=radius / 3) return blurred def photoshop_style_feather(image, mask, radius=150): """ 实现接近Photoshop效果的羽化功能 参数: image: 输入图像 (numpy array) mask: 选区蒙版 (numpy array, 值范围0-255) radius: 羽化半径 (像素) 返回: 羽化后的蒙版 """ # 确保蒙版是uint8类型和单通道 if mask.dtype != np.uint8: mask = (mask * 255).astype(np.uint8) if len(mask.shape) > 2: mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) # 计算高斯核大小 (必须是奇数) kernel_size = max(3, int(2 * np.ceil(2 * radius) + 1)) # 创建扩展的蒙版以模拟Photoshop的边界处理 expanded_size = (mask.shape[0] + 2 * radius, mask.shape[1] + 2 * radius) expanded_mask = np.zeros(expanded_size, dtype=np.uint8) # 将原始蒙版放置在扩展蒙版中央 center_y, center_x = radius, radius expanded_mask[center_y:center_y + mask.shape[0], center_x:center_x + mask.shape[1]] = mask # 应用高斯模糊 (使用reflect边界处理,类似Photoshop) blurred_expanded_mask = cv2.GaussianBlur( expanded_mask, (kernel_size, kernel_size), sigmaX=radius, sigmaY=radius, borderType=cv2.BORDER_REFLECT_101 ) # 提取中央部分作为最终蒙版 feathered_mask = blurred_expanded_mask[center_y:center_y + mask.shape[0], center_x:center_x + mask.shape[1]] # 应用Photoshop特有的蒙版曲线调整 feathered_mask = feathered_mask.astype(np.float32) / 255.0 feathered_mask = np.power(feathered_mask, 1.1) # 轻微增强对比度 feathered_mask = np.clip(feathered_mask * 255, 0, 255).astype(np.uint8) return feathered_mask def calculate_luminance(img): """计算图像的亮度通道(YCbCr色彩空间的Y通道)""" if len(img.shape) == 3: ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) return ycrcb[:, :, 0].astype(np.float32) / 255.0 else: return img.astype(np.float32) / 255.0 def photoshop_feather_blend(adjusted_img, original_img, mask, feather_radius=150, brightness_factor=0.95): """ 使用改进的羽化蒙版融合两张图像,更接近Photoshop效果,解决偏亮问题 参数: adjusted_img: 调整后的图像 original_img: 原始图像 mask: 选区蒙版 (范围0-255) feather_radius: 羽化半径 brightness_compensation: 亮度补偿因子 (0.0-1.0),值越小补偿越多 返回: 融合后的图像 """ if mask.dtype != np.uint8: mask = (mask * 255).astype(np.uint8) if len(mask.shape) > 2: mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) # plt.figure(figsize=(10, 8)) # plt.imshow(mask, cmap='gray') # plt.title("Feathered Mask") # plt.axis('off') # plt.colorbar(label='Opacity') # plt.show() feathered_mask = photoshop_style_feather(original_img, mask, feather_radius) # plt.figure(figsize=(10, 8)) # plt.imshow(feathered_mask, cmap='gray') # plt.title("Feathered Mask") # plt.axis('off') # plt.colorbar(label='Opacity') # plt.show() feathered_mask_float = feathered_mask.astype(np.float32) / 255.0 # 扩展蒙版维度以匹配图像通道数 if len(original_img.shape) == 3 and len(feathered_mask_float.shape) == 2: feathered_mask_float = np.stack([feathered_mask_float] * 3, axis=-1) # 改进的混合算法,解决亮度提升问题 # 1. 将图像转换到线性空间(模拟sRGB到线性RGB的转换) def to_linear(img): img_linear = img.astype(np.float32) / 255.0 # 简单的gamma校正近似 return np.where(img_linear <= 0.04045, img_linear / 12.92, ((img_linear + 0.055) / 1.055) ** 2.4) # 2. 将图像转回sRGB空间 def to_srgb(img_linear): return np.where(img_linear <= 0.0031308, img_linear * 12.92, 1.055 * (img_linear ** (1 / 2.4)) - 0.055) # 3. 在线性空间中执行混合 adjusted_linear = to_linear(adjusted_img) original_linear = to_linear(original_img) # 4. 应用亮度校正因子 luminance_adjustment = np.mean(original_linear, axis=-1, keepdims=True) * (1.0 - brightness_factor) adjusted_linear_corrected = adjusted_linear - luminance_adjustment # 5. 在线性空间中进行混合 blended_linear = (adjusted_linear_corrected * feathered_mask_float + original_linear * (1 - feathered_mask_float)) # 6. 转回sRGB空间并转换回uint8 blended_srgb = to_srgb(blended_linear) blended_img = np.clip(blended_srgb * 255, 0, 255).astype(np.uint8) return blended_img def rgb2lab_image(rgb_img): """将 RGB 图像转换为 Photoshop 风格的 Lab""" rgb = rgb_img.astype(np.float32) / 255.0 mask = rgb > 0.04045 rgb = np.where(mask, np.power((rgb + 0.055) / 1.055, 2.4), rgb / 12.92) XYZ = np.dot(rgb, [ [0.436052025, 0.222491598, 0.013929122], [0.385081593, 0.716886060, 0.097097002], [0.143087414, 0.060621486, 0.714185470] ]) XYZ *= np.array([100.0, 100.0, 100.0]) / [96.4221, 100.0, 82.5211] epsilon = 0.008856 kappa = 903.3 XYZ_norm = np.where(XYZ > epsilon, np.power(XYZ, 1 / 3), (kappa * XYZ + 16) / 116) L = 116 * XYZ_norm[..., 1] - 16 a = 500 * (XYZ_norm[..., 0] - XYZ_norm[..., 1]) b = 200 * (XYZ_norm[..., 1] - XYZ_norm[..., 2]) return np.stack([L, a, b], axis=-1) def photoshop_lab_color_range_optimized(bgr_img, target_lab, tolerance=59, anti_alias=True): """优化的色彩范围选择算法,提供更精确的选择""" rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB) lab_img = rgb2lab_image(rgb_img) # 分离通道 L, a, b = lab_img[:, :, 0], lab_img[:, :, 1], lab_img[:, :, 2] target_L, target_a, target_b = target_lab # 计算各通道差异 diff_L = np.abs(L - target_L) diff_a = np.abs(a - target_a) diff_b = np.abs(b - target_b) # 暗部区域增强处理 dark_boost = np.ones_like(L) dark_mask = L <40 # 只对较暗区域增强 dark_boost[dark_mask] = 1.2 # 增加暗部区域的敏感度 # 改进的加权差异计算,加入暗部增强 weighted_diff = np.sqrt( 0.25 * (diff_L / 100) ** 2 + # 进一步降低亮度权重 0.75 * ((diff_a + diff_b) / 255) ** 2 # 进一步增加颜色权重 ) * 100 # 应用暗部增强 weighted_diff = weighted_diff / dark_boost # 优化的容差转换公式 threshold = 1.6 * (100 - tolerance) / 100 * 23 # 应用更精确的 S 曲线,使用双曲正切函数提供更陡峭的过渡 normalized_diff = weighted_diff / threshold mask = 0.5 * (np.tanh(4 * (1 - normalized_diff)) + 1) # 抗锯齿处理 if anti_alias: mask = cv2.GaussianBlur(mask, (5, 5), 0) return mask def photoshop_add_white(original_img, ): """""" #original_img = cv2.imread(input_path) target_lab = np.array([89.06, 0.59, 6.66], dtype=np.float32) tol= 85 mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol) # plt.figure(figsize=(10, 8)) # plt.imshow(mask, cmap='gray') # plt.title("Feathered Mask") # plt.axis('off') # plt.colorbar(label='Opacity') # plt.show() mask_uint8 = (mask * 255).astype(np.uint8) # adjusted_img = adjust_levels_image(original_img) adjusted_img = adjust_color_balance(original_img, shadow_coefficients=[0, 0, 0], mid_tone_coefficients=[-20, 0, 5], highlight_coefficients=[0, 0, 12],) #cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', adjusted_img) #h, w = adjusted_img.shape[:2] #mask = cv2.resize(mask_uint8, (w, h)) result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8, feather_radius=15, brightness_factor=0.9999) #output_path="/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o_white.jpg" return result def photoshop_add_white3(input_path, ): """""" original_img = cv2.imread(input_path) target_lab = np.array([89.06, 0.59, 6.66], dtype=np.float32) tol= 85 mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol) # plt.figure(figsize=(10, 8)) # plt.imshow(mask, cmap='gray') # plt.title("Feathered Mask") # plt.axis('off') # plt.colorbar(label='Opacity') # plt.show() mask_uint8 = (mask * 255).astype(np.uint8) # adjusted_img = adjust_levels_image(original_img) adjusted_img = adjust_color_balance(original_img, shadow_coefficients=[0, 0, 0], mid_tone_coefficients=[-20, 0, 5], highlight_coefficients=[0, 0, 12],) #cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', adjusted_img) #h, w = adjusted_img.shape[:2] #mask = cv2.resize(mask_uint8, (w, h)) result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8, feather_radius=15, brightness_factor=0.99) output_path="/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o_white.jpg" cv2.imwrite(output_path, result) return result if __name__ == '__main__': start_time = time.time() # image_name="858408_272249Tex1_4_220.jpg" # image_name_new= image_name.replace(".jpg","_999999.jpg") # in_dir = "/data/datasets_20t/fsdownload/image_color_timing/output/" # out_dir = "/data/datasets_20t/fsdownload/image_color_timing/white_add_white/" # os.makedirs(out_dir,exist_ok=True) input_path = "/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o.jpg" photoshop_add_white3(input_path, ) print(f"程序运行时间: {time.time() - start_time:.2f} 秒") """ 百位加白 """