4 changed files with 1232 additions and 12 deletions
@ -0,0 +1,368 @@
@@ -0,0 +1,368 @@
|
||||
import os.path |
||||
import numpy as np |
||||
from scipy.interpolate import CubicSpline |
||||
import cv2 |
||||
|
||||
import argparse |
||||
from ps_image_white_add_white_d import photoshop_add_white |
||||
|
||||
|
||||
def adjust_levels_image(img): |
||||
"""""" |
||||
img = img.astype(np.float32) |
||||
img = 255 * ((img - 20) / (241 - 20)) |
||||
img[img < 0] = 0 |
||||
img[img > 255] = 255 |
||||
img = 255 * np.power(img / 255.0, 1.0 / 1.34) |
||||
img = (img / 255) * (255- 0) + 0 |
||||
img[img < 0] = 0 |
||||
img[img > 255] = 255 |
||||
img = img.astype(np.uint8) |
||||
return img |
||||
|
||||
|
||||
def photoshop_style_feather(image, mask, radius=150): |
||||
""" |
||||
""" |
||||
if mask.dtype != np.uint8: |
||||
mask = (mask * 255).astype(np.uint8) |
||||
if len(mask.shape) > 2: |
||||
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) |
||||
kernel_size = max(3, int(2 * np.ceil(2 * radius) + 1)) |
||||
expanded_size = (mask.shape[0] + 2 * radius, mask.shape[1] + 2 * radius) |
||||
expanded_mask = np.zeros(expanded_size, dtype=np.uint8) |
||||
center_y, center_x = radius, radius |
||||
expanded_mask[center_y:center_y + mask.shape[0], |
||||
center_x:center_x + mask.shape[1]] = mask |
||||
|
||||
blurred_expanded_mask = cv2.GaussianBlur( |
||||
expanded_mask, |
||||
(kernel_size, kernel_size), |
||||
sigmaX=radius, |
||||
sigmaY=radius, |
||||
borderType=cv2.BORDER_REFLECT_101 |
||||
) |
||||
|
||||
feathered_mask = blurred_expanded_mask[center_y:center_y + mask.shape[0], |
||||
center_x:center_x + mask.shape[1]] |
||||
|
||||
feathered_mask = feathered_mask.astype(np.float32) / 255.0 |
||||
feathered_mask = np.power(feathered_mask, 1.1) # 轻微增强对比度 |
||||
feathered_mask = np.clip(feathered_mask * 255, 0, 255).astype(np.uint8) |
||||
|
||||
return feathered_mask |
||||
|
||||
def calculate_luminance(img): |
||||
"""""" |
||||
if len(img.shape) == 3: |
||||
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) |
||||
return ycrcb[:, :, 0].astype(np.float32) / 255.0 |
||||
else: |
||||
return img.astype(np.float32) / 255.0 |
||||
|
||||
|
||||
def photoshop_feather_blend(adjusted_img, original_img, mask, feather_radius=150, brightness_factor=0.95): |
||||
""" |
||||
""" |
||||
if mask.dtype != np.uint8: |
||||
mask = (mask * 255).astype(np.uint8) |
||||
if len(mask.shape) > 2: |
||||
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
|
||||
feathered_mask = photoshop_style_feather(original_img, mask, feather_radius) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(feathered_mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
feathered_mask_float = feathered_mask.astype(np.float32) / 255.0 |
||||
|
||||
if len(original_img.shape) == 3 and len(feathered_mask_float.shape) == 2: |
||||
feathered_mask_float = np.stack([feathered_mask_float] * 3, axis=-1) |
||||
|
||||
def to_linear(img): |
||||
img_linear = img.astype(np.float32) / 255.0 |
||||
return np.where(img_linear <= 0.04045, |
||||
img_linear / 12.92, |
||||
((img_linear + 0.055) / 1.055) ** 2.4) |
||||
|
||||
def to_srgb(img_linear): |
||||
return np.where(img_linear <= 0.0031308, |
||||
img_linear * 12.92, |
||||
1.055 * (img_linear ** (1 / 2.4)) - 0.055) |
||||
|
||||
adjusted_linear = to_linear(adjusted_img) |
||||
original_linear = to_linear(original_img) |
||||
|
||||
luminance_adjustment = np.mean(original_linear, axis=-1, keepdims=True) * (1.0 - brightness_factor) |
||||
adjusted_linear_corrected = adjusted_linear - luminance_adjustment |
||||
|
||||
blended_linear = (adjusted_linear_corrected * feathered_mask_float + |
||||
original_linear * (1 - feathered_mask_float)) |
||||
|
||||
blended_srgb = to_srgb(blended_linear) |
||||
blended_img = np.clip(blended_srgb * 255, 0, 255).astype(np.uint8) |
||||
|
||||
return blended_img |
||||
|
||||
|
||||
def rgb2lab_image(rgb_img): |
||||
"""""" |
||||
rgb = rgb_img.astype(np.float32) / 255.0 |
||||
|
||||
mask = rgb > 0.04045 |
||||
rgb = np.where(mask, |
||||
np.power((rgb + 0.055) / 1.055, 2.4), |
||||
rgb / 12.92) |
||||
|
||||
XYZ = np.dot(rgb, [ |
||||
[0.436052025, 0.222491598, 0.013929122], |
||||
[0.385081593, 0.716886060, 0.097097002], |
||||
[0.143087414, 0.060621486, 0.714185470] |
||||
]) |
||||
|
||||
XYZ *= np.array([100.0, 100.0, 100.0]) / [96.4221, 100.0, 82.5211] |
||||
|
||||
epsilon = 0.008856 |
||||
kappa = 903.3 |
||||
|
||||
XYZ_norm = np.where(XYZ > epsilon, |
||||
np.power(XYZ, 1 / 3), |
||||
(kappa * XYZ + 16) / 116) |
||||
|
||||
L = 116 * XYZ_norm[..., 1] - 16 |
||||
a = 500 * (XYZ_norm[..., 0] - XYZ_norm[..., 1]) |
||||
b = 200 * (XYZ_norm[..., 1] - XYZ_norm[..., 2]) |
||||
|
||||
return np.stack([L, a, b], axis=-1) |
||||
|
||||
|
||||
def photoshop_lab_color_range_optimized(bgr_img, target_lab, tolerance=59, anti_alias=True): |
||||
"""""" |
||||
|
||||
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB) |
||||
lab_img = rgb2lab_image(rgb_img) |
||||
|
||||
L, a, b = lab_img[:, :, 0], lab_img[:, :, 1], lab_img[:, :, 2] |
||||
target_L, target_a, target_b = target_lab |
||||
|
||||
diff_L = np.abs(L - target_L) |
||||
diff_a = np.abs(a - target_a) |
||||
diff_b = np.abs(b - target_b) |
||||
|
||||
dark_boost = np.ones_like(L) |
||||
dark_mask = L <40 |
||||
dark_boost[dark_mask] = 1.2 |
||||
|
||||
weighted_diff = np.sqrt( |
||||
0.25 * (diff_L / 100) ** 2 + |
||||
0.75 * ((diff_a + diff_b) / 255) ** 2 |
||||
) * 100 |
||||
|
||||
weighted_diff = weighted_diff / dark_boost |
||||
|
||||
threshold = 1.6 * (100 - tolerance) / 100 * 23 |
||||
|
||||
normalized_diff = weighted_diff / threshold |
||||
mask = 0.5 * (np.tanh(4 * (1 - normalized_diff)) + 1) |
||||
|
||||
if anti_alias: |
||||
mask = cv2.GaussianBlur(mask, (5, 5), 0) |
||||
|
||||
return mask |
||||
|
||||
def generate_curve_lut(x_points, y_points): |
||||
""" |
||||
输入采样点,生成 256 长度的查找表(LUT) |
||||
""" |
||||
cs = CubicSpline(x_points, y_points, bc_type='natural') |
||||
x = np.arange(256) |
||||
y = cs(x) |
||||
y = np.clip(y, 0, 255).astype(np.uint8) |
||||
return y |
||||
|
||||
def apply_curve(img, lut): |
||||
""" |
||||
对图像的每个通道应用曲线 LUT(复合通道) |
||||
""" |
||||
result = cv2.LUT(img, lut) |
||||
return result |
||||
|
||||
def add_color_image(img): |
||||
"""""" |
||||
# x_points = [0, 131, 255] |
||||
# y_points = [0, 124, 255] |
||||
x_points = [6, 184, 255] |
||||
y_points = [0, 191, 255] |
||||
lut = generate_curve_lut(x_points, y_points) |
||||
adjusted = apply_curve(img, lut) |
||||
|
||||
return adjusted |
||||
|
||||
def unsharp_mask(image, radius=5.0, amount=1.5, threshold=10): |
||||
""" |
||||
对图像应用 Unsharp Mask 锐化。 |
||||
|
||||
参数: |
||||
- image: 输入图像,必须是3通道BGR格式 |
||||
- radius: 高斯模糊半径(标准差) |
||||
- amount: 锐化强度 |
||||
- threshold: 差异阈值,仅大于该值的区域会被增强 |
||||
""" |
||||
if len(image.shape) != 3 or image.shape[2] != 3: |
||||
raise ValueError("输入必须是3通道BGR图像") |
||||
if max(image.shape[:2]) > 20000: |
||||
return unsharp_mask_blockwise(image, radius, amount, threshold) |
||||
|
||||
img_float = image.astype(np.float32) if image.dtype != np.float32 else image |
||||
blurred = cv2.GaussianBlur(img_float, (0, 0), radius) |
||||
diff = img_float - blurred |
||||
mask = np.abs(diff) > threshold |
||||
sharpened = img_float.copy() |
||||
sharpened[mask] = img_float[mask] + diff[mask] * amount |
||||
return np.clip(sharpened, 0, 255).astype(np.uint8) |
||||
|
||||
|
||||
def unsharp_mask_blockwise(image, radius=5.0, amount=1.5, threshold=10, block_size=1024): |
||||
""" |
||||
分块执行 Unsharp Mask,适用于超大图像,防止内存爆炸。 |
||||
""" |
||||
h, w = image.shape[:2] |
||||
output = np.zeros_like(image) |
||||
for y in range(0, h, block_size): |
||||
for x in range(0, w, block_size): |
||||
block = image[y:y + block_size, x:x + block_size] |
||||
output[y:y + block_size, x:x + block_size] = unsharp_mask(block, radius, amount, threshold) |
||||
return output |
||||
|
||||
|
||||
def add_shadow_image(img): |
||||
"""""" |
||||
x_points = [0, 131, 255] |
||||
y_points = [0, 124, 255] |
||||
|
||||
lut = generate_curve_lut(x_points, y_points) |
||||
adjusted = apply_curve(img, lut) |
||||
|
||||
return adjusted |
||||
|
||||
|
||||
|
||||
|
||||
def create_red_mask(img): |
||||
"""使用 Lab 空间中的 A 通道提取红色区域,返回 (h, w, 1) 掩码""" |
||||
lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab) |
||||
a = lab[..., 1].astype(np.float32) |
||||
red_score = np.clip((a - 128) / 50.0, 0, 1) # A 通道 > 128 表示偏红 |
||||
red_score = cv2.GaussianBlur(red_score, (9, 9), sigmaX=4) |
||||
return red_score[..., np.newaxis] # 变成 (h, w, 1) |
||||
|
||||
|
||||
|
||||
def cmyk_to_rgb(cmyk): |
||||
"""CMYK → RGB,模拟Photoshop近似""" |
||||
c, m, y, k = cmyk[..., 0], cmyk[..., 1], cmyk[..., 2], cmyk[..., 3] |
||||
r = (1 - c) * (1 - k) |
||||
g = (1 - m) * (1 - k) |
||||
b = (1 - y) * (1 - k) |
||||
return np.clip(np.stack([r, g, b], axis=-1) * 255, 0, 255) |
||||
|
||||
|
||||
def rgb_to_cmyk(rgb): |
||||
"""RGB → CMYK,模拟Photoshop近似""" |
||||
r, g, b = rgb[..., 0] / 255.0, rgb[..., 1] / 255.0, rgb[..., 2] / 255.0 |
||||
k = 1 - np.maximum.reduce([r, g, b]) |
||||
k_safe = np.where(k == 1, 1, k) |
||||
|
||||
c = np.where(k == 1, 0, (1 - r - k) / (1 - k_safe)) |
||||
m = np.where(k == 1, 0, (1 - g - k) / (1 - k_safe)) |
||||
y = np.where(k == 1, 0, (1 - b - k) / (1 - k_safe)) |
||||
return np.stack([c, m, y, k], axis=-1) |
||||
|
||||
def selective_color_adjustment(img, target_color, cmyk_adjustments, relative=True): |
||||
if target_color != 'red': |
||||
raise NotImplementedError("当前只支持 red") |
||||
|
||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32) |
||||
mask = create_red_mask(img) |
||||
|
||||
cmyk = rgb_to_cmyk(img_rgb) |
||||
|
||||
# 应用 CMYK 调整 |
||||
for channel, value in cmyk_adjustments.items(): |
||||
idx = {'cyan': 0, 'magenta': 1, 'yellow': 2, 'black': 3}.get(channel) |
||||
if idx is None: |
||||
continue |
||||
original = cmyk[..., idx] |
||||
v = value / 100.0 |
||||
|
||||
if relative: |
||||
# 非线性相对调整,更接近 Photoshop 曲线(经验公式) |
||||
adjusted = original * (1 + v) ** 1.35 # gamma 可调 |
||||
else: |
||||
adjusted = original + v |
||||
|
||||
cmyk[..., idx] = np.clip(adjusted, 0, 1) |
||||
|
||||
# 转换回 RGB 并混合 |
||||
adjusted_rgb = cmyk_to_rgb(cmyk) |
||||
output_rgb = img_rgb * (1 - mask) + adjusted_rgb * mask |
||||
output_rgb = np.clip(output_rgb, 0, 255).astype(np.uint8) |
||||
|
||||
return cv2.cvtColor(output_rgb, cv2.COLOR_RGB2BGR) |
||||
|
||||
def reduce_red_black_relative(img): |
||||
"""模拟 Photoshop:红色 → 黑色 -8%,相对模式""" |
||||
return selective_color_adjustment( |
||||
img, |
||||
target_color='red', |
||||
cmyk_adjustments={'black': -8}, |
||||
relative=True |
||||
) |
||||
|
||||
|
||||
|
||||
def photoshop_actions_emulation(input_path, output_path): |
||||
"""""" |
||||
original_img = cv2.imread(input_path) |
||||
# 加暗 |
||||
shadow_image1=add_shadow_image(original_img) |
||||
shadow_image2 = add_shadow_image(shadow_image1) |
||||
|
||||
# output_down_path= output_path.replace(".jpg","down.jpg") |
||||
# cv2.imwrite(output_down_path, shadow_image2) |
||||
|
||||
original_img_color=add_color_image(shadow_image2) |
||||
# output_color_path= output_path.replace(".jpg","add_color.jpg") |
||||
# cv2.imwrite(output_color_path, original_img_color) |
||||
#白位加白 |
||||
result_white_image= photoshop_add_white(original_img_color) |
||||
# output_white_path= output_color_path.replace(".jpg","white.jpg") |
||||
# cv2.imwrite(output_white_path, result_white_image) |
||||
#锐化 |
||||
result_usm = unsharp_mask(result_white_image, radius=2, amount=0.4, threshold=10) |
||||
|
||||
cv2.imwrite(output_path, result_usm) |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
arg = argparse.ArgumentParser() |
||||
arg.add_argument('--image_name', type=str, default='274351Tex1_adjusted060518_2_221.jpg') |
||||
arg.add_argument('--image_name_new', type=str, default='274351Tex1_adjusted060518_2_221_999999.jpg') |
||||
arg.add_argument('--in_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/output/') |
||||
arg.add_argument('--out_dir', type=str, default='/data/datasets_20t/fsdownload/image_color_timing/shadow_up/') |
||||
args = arg.parse_args() |
||||
os.makedirs(args.out_dir,exist_ok=True) |
||||
input_path = os.path.join(args.in_dir,args.image_name) |
||||
output_path = os.path.join(args.out_dir,args.image_name_new) |
||||
photoshop_actions_emulation(input_path, output_path) |
||||
人种 |
||||
|
||||
|
||||
@ -0,0 +1,492 @@
@@ -0,0 +1,492 @@
|
||||
import os.path |
||||
import numpy as np |
||||
from PIL import Image |
||||
from scipy.ndimage import gaussian_filter |
||||
import matplotlib.pyplot as plt |
||||
from skimage import io, color |
||||
from PIL import Image |
||||
import cv2 |
||||
from skimage.exposure import match_histograms |
||||
from skimage.color import rgb2lab, lab2rgb |
||||
import time |
||||
|
||||
|
||||
def rgb_to_lab_photoshop(rgb): |
||||
""" |
||||
将 RGB 转换为 LAB,使用 Photoshop 中的转换公式。 |
||||
""" |
||||
# 线性化 RGB |
||||
r, g, b = rgb[..., 0] / 255.0, rgb[..., 1] / 255.0, rgb[..., 2] / 255.0 |
||||
r = np.where(r <= 0.04045, r, ((r + 0.055) / 1.055) ** 2.4) |
||||
g = np.where(g <= 0.04045, g, ((g + 0.055) / 1.055) ** 2.4) |
||||
b = np.where(b <= 0.04045, b, ((b + 0.055) / 1.055) ** 2.4) |
||||
|
||||
x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375 |
||||
y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750 |
||||
z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041 |
||||
|
||||
# 白点 D65 归一化 |
||||
x = x / 0.95047 |
||||
y = y / 1.00000 |
||||
z = z / 1.08883 |
||||
|
||||
# XYZ 到 LAB 转换 |
||||
epsilon = 0.008856 |
||||
kappa = 903.3 |
||||
|
||||
x = np.where(x > epsilon, x ** (1 / 3), (kappa * x + 16) / 116) |
||||
y = np.where(y > epsilon, y ** (1 / 3), (kappa * y + 16) / 116) |
||||
z = np.where(z > epsilon, z ** (1 / 3), (kappa * z + 16) / 116) |
||||
|
||||
l = 116 * y - 16 |
||||
a = 500 * (x - y) |
||||
b = 200 * (y - z) |
||||
|
||||
return np.stack([l, a, b], axis=-1) |
||||
|
||||
|
||||
|
||||
def create_mask_from_lab(lab_image, target_L, target_a, target_b, tolerance_L=5, tolerance_a=5, tolerance_b=5): |
||||
""" |
||||
根据给定的 L, a, b 值范围,生成一个掩膜。 |
||||
|
||||
参数: |
||||
lab_image: 输入的 LAB 图像 |
||||
target_L: 目标 L 值 |
||||
target_a: 目标 a 值 |
||||
target_b: 目标 b 值 |
||||
tolerance_L: L 值的容差范围 |
||||
tolerance_a: a 值的容差范围 |
||||
tolerance_b: b 值的容差范围 |
||||
|
||||
返回: |
||||
mask: 生成的掩膜(1 表示符合条件,0 表示不符合) |
||||
""" |
||||
|
||||
mask = ( |
||||
(np.abs(lab_image[..., 0] - target_L) <= tolerance_L) & |
||||
(np.abs(lab_image[..., 1] - target_a) <= tolerance_a) & |
||||
(np.abs(lab_image[..., 2] - target_b) <= tolerance_b) |
||||
).astype(np.uint8) |
||||
|
||||
return mask |
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def filter_contours_by_enclosing_circle(mask, min_diameter=50): |
||||
""" |
||||
""" |
||||
cleaned_mask = np.zeros_like(mask) |
||||
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
||||
|
||||
for contour in contours: |
||||
# 计算最小外接圆 |
||||
(x, y), radius = cv2.minEnclosingCircle(contour) |
||||
diameter = 2 * radius |
||||
|
||||
# 判断是否能放下指定直径的圆 |
||||
if diameter >= min_diameter: |
||||
cv2.drawContours(cleaned_mask, [contour], -1, 255, thickness=cv2.FILLED) |
||||
|
||||
return cleaned_mask |
||||
|
||||
|
||||
# def adjust_levels_image(img): |
||||
# """""" |
||||
# img = img.astype(np.float32) |
||||
# img = 255 * ((img - 20) / (241 - 20)) |
||||
# img[img < 0] = 0 |
||||
# img[img > 255] = 255 |
||||
# img = 255 * np.power(img / 255.0, 1.0 / 1.34) |
||||
# img = (img / 255) * (255- 0) + 0 |
||||
# img[img < 0] = 0 |
||||
# img[img > 255] = 255 |
||||
# img = img.astype(np.uint8) |
||||
# return img |
||||
|
||||
def normalize_coeffs(coeffs): |
||||
median_val = sorted(coeffs)[1] |
||||
return np.array([coef - median_val for coef in coeffs]) |
||||
|
||||
def img_bgr2rgb(img): |
||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
||||
img = img.astype(np.float32) / 255.0 |
||||
img = img.copy() |
||||
return img |
||||
|
||||
def highlight_handle(img, highlight_coeffs): |
||||
highlights_alpha = 0.003923 * normalize_coeffs(highlight_coeffs) |
||||
|
||||
a = np.diag(np.maximum(highlights_alpha, 0)) * np.eye(3) |
||||
b = np.diag(-np.minimum(highlights_alpha, 0)) * (1 - np.eye(3)) |
||||
|
||||
highlights_alpha = np.sum(a + b, axis=0) |
||||
img = img / (1 - highlights_alpha.reshape(1, 1, 3)) |
||||
return img |
||||
|
||||
|
||||
def shadow_handle(img, shadows_coef): |
||||
shadows_alpha = 0.003923 * normalize_coeffs(shadows_coef) |
||||
|
||||
a = np.diag(-np.minimum(shadows_alpha, 0)) * np.eye(3) |
||||
b = np.diag(np.maximum(shadows_alpha, 0)) * (1 - np.eye(3)) |
||||
shadows_alpha = np.sum(a + b, axis=0) |
||||
|
||||
img = (img - shadows_alpha.reshape(1, 1, 3)) / (1 - shadows_alpha.reshape(1, 1, 3)) |
||||
return img |
||||
|
||||
|
||||
def mid_tone_handle(img, mid_tone_coeffs): |
||||
mid_tone_alpha = -0.0033944 * normalize_coeffs(mid_tone_coeffs) |
||||
|
||||
f = np.diag(mid_tone_alpha) * (2 * np.eye(3) - 1) |
||||
mid_tone_gamma = np.exp(np.sum(f, axis=0)) |
||||
|
||||
img = np.power(img, mid_tone_gamma.reshape(1, 1, 3)) |
||||
return img |
||||
|
||||
def img_rgb2bgr(img): |
||||
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX) |
||||
img = img.astype(np.uint8) |
||||
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) |
||||
return img |
||||
|
||||
def adjust_color_balance( image, |
||||
shadow_coefficients, |
||||
mid_tone_coefficients, |
||||
highlight_coefficients): |
||||
"""""" |
||||
image = img_bgr2rgb(image) |
||||
image = highlight_handle(image, highlight_coefficients) |
||||
image = shadow_handle(image, shadow_coefficients) |
||||
image = mid_tone_handle(image, mid_tone_coefficients) |
||||
image = np.clip(image, 0, 1) |
||||
image = img_rgb2bgr(image) |
||||
cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', image) |
||||
return image |
||||
|
||||
|
||||
|
||||
def apply_gamut_mapping(values): |
||||
"""防止颜色溢出的色域映射函数""" |
||||
# 压缩高光区域以防止过曝 |
||||
values = np.where(values > 0.9, 0.9 + (values - 0.9) * 0.5, values) |
||||
# 提亮阴影区域以防止死黑 |
||||
values = np.where(values < 0.1, 0.1 * (values / 0.1) ** 0.7, values) |
||||
return np.clip(values, 0, 1) |
||||
|
||||
# def match_colors(source, target, mask): |
||||
# """ |
||||
# """ |
||||
# matched = match_histograms(source, target, channel_axis=-1) |
||||
# mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask |
||||
# return source * (1 - mask_3ch) + matched * mask_3ch |
||||
|
||||
# def match_colors(source, target, mask, strength): |
||||
# """ |
||||
# :param source: 源图像 (H,W,3) |
||||
# :param target: 目标图像 (H,W,3) |
||||
# :param mask: 遮罩 (H,W) 或 (H,W,3), 值范围 0~1 |
||||
# :param strength: 颜色匹配强度 (0~1) |
||||
# """ |
||||
# matched = match_histograms(source, target, channel_axis=-1) |
||||
# mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask |
||||
# mask_weighted = mask_3ch * strength # 控制强度 |
||||
# return source * (1 - mask_weighted) + matched * mask_weighted |
||||
|
||||
def match_colors(source, target, mask, strength, brightness_scale=0.9): |
||||
matched = match_histograms(source, target, channel_axis=-1) |
||||
matched = matched * brightness_scale # 降低亮度 |
||||
mask_3ch = cv2.merge([mask] * 3) if len(mask.shape) == 2 else mask |
||||
mask_weighted = mask_3ch * strength |
||||
return source * (1 - mask_weighted) + matched * mask_weighted |
||||
|
||||
|
||||
def apply_feathering(mask, radius): |
||||
"""""" |
||||
mask_float = mask.astype(np.float32) / 255.0 |
||||
blurred = cv2.GaussianBlur(mask_float, (0, 0), sigmaX=radius / 3, sigmaY=radius / 3) |
||||
return blurred |
||||
|
||||
|
||||
def photoshop_style_feather(image, mask, radius=150): |
||||
""" |
||||
实现接近Photoshop效果的羽化功能 |
||||
|
||||
参数: |
||||
image: 输入图像 (numpy array) |
||||
mask: 选区蒙版 (numpy array, 值范围0-255) |
||||
radius: 羽化半径 (像素) |
||||
|
||||
返回: |
||||
羽化后的蒙版 |
||||
""" |
||||
# 确保蒙版是uint8类型和单通道 |
||||
if mask.dtype != np.uint8: |
||||
mask = (mask * 255).astype(np.uint8) |
||||
if len(mask.shape) > 2: |
||||
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) |
||||
|
||||
# 计算高斯核大小 (必须是奇数) |
||||
kernel_size = max(3, int(2 * np.ceil(2 * radius) + 1)) |
||||
|
||||
# 创建扩展的蒙版以模拟Photoshop的边界处理 |
||||
expanded_size = (mask.shape[0] + 2 * radius, mask.shape[1] + 2 * radius) |
||||
expanded_mask = np.zeros(expanded_size, dtype=np.uint8) |
||||
|
||||
# 将原始蒙版放置在扩展蒙版中央 |
||||
center_y, center_x = radius, radius |
||||
expanded_mask[center_y:center_y + mask.shape[0], |
||||
center_x:center_x + mask.shape[1]] = mask |
||||
|
||||
# 应用高斯模糊 (使用reflect边界处理,类似Photoshop) |
||||
blurred_expanded_mask = cv2.GaussianBlur( |
||||
expanded_mask, |
||||
(kernel_size, kernel_size), |
||||
sigmaX=radius, |
||||
sigmaY=radius, |
||||
borderType=cv2.BORDER_REFLECT_101 |
||||
) |
||||
|
||||
# 提取中央部分作为最终蒙版 |
||||
feathered_mask = blurred_expanded_mask[center_y:center_y + mask.shape[0], |
||||
center_x:center_x + mask.shape[1]] |
||||
|
||||
# 应用Photoshop特有的蒙版曲线调整 |
||||
feathered_mask = feathered_mask.astype(np.float32) / 255.0 |
||||
feathered_mask = np.power(feathered_mask, 1.1) # 轻微增强对比度 |
||||
feathered_mask = np.clip(feathered_mask * 255, 0, 255).astype(np.uint8) |
||||
|
||||
return feathered_mask |
||||
|
||||
def calculate_luminance(img): |
||||
"""计算图像的亮度通道(YCbCr色彩空间的Y通道)""" |
||||
if len(img.shape) == 3: |
||||
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) |
||||
return ycrcb[:, :, 0].astype(np.float32) / 255.0 |
||||
else: |
||||
return img.astype(np.float32) / 255.0 |
||||
|
||||
|
||||
def photoshop_feather_blend(adjusted_img, original_img, mask, feather_radius=150, brightness_factor=0.95): |
||||
""" |
||||
使用改进的羽化蒙版融合两张图像,更接近Photoshop效果,解决偏亮问题 |
||||
|
||||
参数: |
||||
adjusted_img: 调整后的图像 |
||||
original_img: 原始图像 |
||||
mask: 选区蒙版 (范围0-255) |
||||
feather_radius: 羽化半径 |
||||
brightness_compensation: 亮度补偿因子 (0.0-1.0),值越小补偿越多 |
||||
|
||||
返回: |
||||
融合后的图像 |
||||
""" |
||||
if mask.dtype != np.uint8: |
||||
mask = (mask * 255).astype(np.uint8) |
||||
if len(mask.shape) > 2: |
||||
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
|
||||
feathered_mask = photoshop_style_feather(original_img, mask, feather_radius) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(feathered_mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
feathered_mask_float = feathered_mask.astype(np.float32) / 255.0 |
||||
|
||||
# 扩展蒙版维度以匹配图像通道数 |
||||
if len(original_img.shape) == 3 and len(feathered_mask_float.shape) == 2: |
||||
feathered_mask_float = np.stack([feathered_mask_float] * 3, axis=-1) |
||||
|
||||
# 改进的混合算法,解决亮度提升问题 |
||||
# 1. 将图像转换到线性空间(模拟sRGB到线性RGB的转换) |
||||
def to_linear(img): |
||||
img_linear = img.astype(np.float32) / 255.0 |
||||
# 简单的gamma校正近似 |
||||
return np.where(img_linear <= 0.04045, |
||||
img_linear / 12.92, |
||||
((img_linear + 0.055) / 1.055) ** 2.4) |
||||
|
||||
# 2. 将图像转回sRGB空间 |
||||
def to_srgb(img_linear): |
||||
return np.where(img_linear <= 0.0031308, |
||||
img_linear * 12.92, |
||||
1.055 * (img_linear ** (1 / 2.4)) - 0.055) |
||||
|
||||
# 3. 在线性空间中执行混合 |
||||
adjusted_linear = to_linear(adjusted_img) |
||||
original_linear = to_linear(original_img) |
||||
|
||||
# 4. 应用亮度校正因子 |
||||
luminance_adjustment = np.mean(original_linear, axis=-1, keepdims=True) * (1.0 - brightness_factor) |
||||
adjusted_linear_corrected = adjusted_linear - luminance_adjustment |
||||
|
||||
# 5. 在线性空间中进行混合 |
||||
blended_linear = (adjusted_linear_corrected * feathered_mask_float + |
||||
original_linear * (1 - feathered_mask_float)) |
||||
|
||||
# 6. 转回sRGB空间并转换回uint8 |
||||
blended_srgb = to_srgb(blended_linear) |
||||
blended_img = np.clip(blended_srgb * 255, 0, 255).astype(np.uint8) |
||||
|
||||
return blended_img |
||||
|
||||
|
||||
def rgb2lab_image(rgb_img): |
||||
"""将 RGB 图像转换为 Photoshop 风格的 Lab""" |
||||
rgb = rgb_img.astype(np.float32) / 255.0 |
||||
|
||||
mask = rgb > 0.04045 |
||||
rgb = np.where(mask, |
||||
np.power((rgb + 0.055) / 1.055, 2.4), |
||||
rgb / 12.92) |
||||
|
||||
XYZ = np.dot(rgb, [ |
||||
[0.436052025, 0.222491598, 0.013929122], |
||||
[0.385081593, 0.716886060, 0.097097002], |
||||
[0.143087414, 0.060621486, 0.714185470] |
||||
]) |
||||
|
||||
XYZ *= np.array([100.0, 100.0, 100.0]) / [96.4221, 100.0, 82.5211] |
||||
|
||||
epsilon = 0.008856 |
||||
kappa = 903.3 |
||||
|
||||
XYZ_norm = np.where(XYZ > epsilon, |
||||
np.power(XYZ, 1 / 3), |
||||
(kappa * XYZ + 16) / 116) |
||||
|
||||
L = 116 * XYZ_norm[..., 1] - 16 |
||||
a = 500 * (XYZ_norm[..., 0] - XYZ_norm[..., 1]) |
||||
b = 200 * (XYZ_norm[..., 1] - XYZ_norm[..., 2]) |
||||
|
||||
return np.stack([L, a, b], axis=-1) |
||||
|
||||
|
||||
def photoshop_lab_color_range_optimized(bgr_img, target_lab, tolerance=59, anti_alias=True): |
||||
"""优化的色彩范围选择算法,提供更精确的选择""" |
||||
|
||||
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB) |
||||
lab_img = rgb2lab_image(rgb_img) |
||||
|
||||
# 分离通道 |
||||
L, a, b = lab_img[:, :, 0], lab_img[:, :, 1], lab_img[:, :, 2] |
||||
target_L, target_a, target_b = target_lab |
||||
|
||||
# 计算各通道差异 |
||||
diff_L = np.abs(L - target_L) |
||||
diff_a = np.abs(a - target_a) |
||||
diff_b = np.abs(b - target_b) |
||||
|
||||
# 暗部区域增强处理 |
||||
dark_boost = np.ones_like(L) |
||||
dark_mask = L <40 # 只对较暗区域增强 |
||||
dark_boost[dark_mask] = 1.2 # 增加暗部区域的敏感度 |
||||
|
||||
# 改进的加权差异计算,加入暗部增强 |
||||
weighted_diff = np.sqrt( |
||||
0.25 * (diff_L / 100) ** 2 + # 进一步降低亮度权重 |
||||
0.75 * ((diff_a + diff_b) / 255) ** 2 # 进一步增加颜色权重 |
||||
) * 100 |
||||
|
||||
# 应用暗部增强 |
||||
weighted_diff = weighted_diff / dark_boost |
||||
|
||||
# 优化的容差转换公式 |
||||
threshold = 1.6 * (100 - tolerance) / 100 * 23 |
||||
|
||||
# 应用更精确的 S 曲线,使用双曲正切函数提供更陡峭的过渡 |
||||
normalized_diff = weighted_diff / threshold |
||||
mask = 0.5 * (np.tanh(4 * (1 - normalized_diff)) + 1) |
||||
|
||||
# 抗锯齿处理 |
||||
if anti_alias: |
||||
mask = cv2.GaussianBlur(mask, (5, 5), 0) |
||||
|
||||
return mask |
||||
|
||||
def photoshop_add_white(original_img, ): |
||||
"""""" |
||||
#original_img = cv2.imread(input_path) |
||||
target_lab = np.array([89.06, 0.59, 6.66], dtype=np.float32) |
||||
tol= 85 |
||||
mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
mask_uint8 = (mask * 255).astype(np.uint8) |
||||
|
||||
# adjusted_img = adjust_levels_image(original_img) |
||||
adjusted_img = adjust_color_balance(original_img, |
||||
shadow_coefficients=[0, 0, 0], |
||||
mid_tone_coefficients=[-20, 0, 5], |
||||
highlight_coefficients=[0, 0, 12],) |
||||
|
||||
#cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', adjusted_img) |
||||
#h, w = adjusted_img.shape[:2] |
||||
#mask = cv2.resize(mask_uint8, (w, h)) |
||||
result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8, |
||||
feather_radius=15, brightness_factor=0.9999) |
||||
#output_path="/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o_white.jpg" |
||||
|
||||
return result |
||||
|
||||
|
||||
def photoshop_add_white3(input_path, ): |
||||
"""""" |
||||
original_img = cv2.imread(input_path) |
||||
target_lab = np.array([89.06, 0.59, 6.66], dtype=np.float32) |
||||
tol= 85 |
||||
mask = photoshop_lab_color_range_optimized(original_img, target_lab, tol) |
||||
# plt.figure(figsize=(10, 8)) |
||||
# plt.imshow(mask, cmap='gray') |
||||
# plt.title("Feathered Mask") |
||||
# plt.axis('off') |
||||
# plt.colorbar(label='Opacity') |
||||
# plt.show() |
||||
mask_uint8 = (mask * 255).astype(np.uint8) |
||||
|
||||
# adjusted_img = adjust_levels_image(original_img) |
||||
adjusted_img = adjust_color_balance(original_img, |
||||
shadow_coefficients=[0, 0, 0], |
||||
mid_tone_coefficients=[-20, 0, 5], |
||||
highlight_coefficients=[0, 0, 12],) |
||||
|
||||
#cv2.imwrite('/data/datasets_20t/fsdownload/image_color_timing/white_add_white/9999999999999.jpg', adjusted_img) |
||||
#h, w = adjusted_img.shape[:2] |
||||
#mask = cv2.resize(mask_uint8, (w, h)) |
||||
result = photoshop_feather_blend(adjusted_img, original_img, mask_uint8, |
||||
feather_radius=15, brightness_factor=0.99) |
||||
output_path="/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o_white.jpg" |
||||
cv2.imwrite(output_path, result) |
||||
return result |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
start_time = time.time() |
||||
# image_name="858408_272249Tex1_4_220.jpg" |
||||
# image_name_new= image_name.replace(".jpg","_999999.jpg") |
||||
# in_dir = "/data/datasets_20t/fsdownload/image_color_timing/output/" |
||||
# out_dir = "/data/datasets_20t/fsdownload/image_color_timing/white_add_white/" |
||||
# os.makedirs(out_dir,exist_ok=True) |
||||
input_path = "/data/datasets_20t/Downloads_google/correct_show_obj_dream_tech/290082/cache/290082Tex1_o.jpg" |
||||
|
||||
photoshop_add_white3(input_path, ) |
||||
print(f"程序运行时间: {time.time() - start_time:.2f} 秒") |
||||
""" |
||||
百位加白 |
||||
""" |
||||
|
||||
@ -0,0 +1,354 @@
@@ -0,0 +1,354 @@
|
||||
import os.path |
||||
import shutil |
||||
import time |
||||
import argparse |
||||
import cv2 |
||||
import numpy as np |
||||
from scipy.interpolate import CubicSpline |
||||
import sys, os |
||||
from PIL import Image, ImageEnhance |
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__))) |
||||
from ps_image_shadow_up_ag_two_d import photoshop_actions_emulation |
||||
|
||||
|
||||
def smootherstep(x): |
||||
"""五次平滑插值函数:更加平滑过渡""" |
||||
return x**3 * (x * (x * 6 - 15) + 10) |
||||
|
||||
|
||||
def perceptual_smooth_adjustment_color_blend(img, threshold=220, reduction=0.5, margin=10, saturation_sensitivity=0.3, blur_radius=5, color_blend_strength=0.5): |
||||
""" |
||||
更平滑、颜色融合感知亮度压制 |
||||
|
||||
- threshold: 压制起始亮度(V 通道) |
||||
- reduction: 压制强度(0-1) |
||||
- margin: 阈值过渡区间(像素亮度差) |
||||
- saturation_sensitivity: 饱和度高时减弱压制 |
||||
- blur_radius: 用于颜色融合的模糊半径 |
||||
- color_blend_strength: 颜色融合程度(0~1) |
||||
""" |
||||
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) |
||||
h, s, v = cv2.split(hsv) |
||||
|
||||
v = v.astype(np.float32) |
||||
s = s.astype(np.float32) |
||||
|
||||
# 饱和度感知压制减弱 |
||||
sat_weight = 1.0 - (s / 255.0 * saturation_sensitivity) |
||||
sat_weight = np.clip(sat_weight, 0.0, 1.0) |
||||
|
||||
# 平滑压制权重计算 |
||||
delta = v - threshold |
||||
transition = np.zeros_like(v, dtype=np.float32) |
||||
|
||||
in_range = (delta > 0) & (delta < margin) |
||||
transition[in_range] = smootherstep(delta[in_range] / margin) |
||||
transition[delta >= margin] = 1.0 |
||||
|
||||
# 压制权重融合 |
||||
weight = reduction * transition * sat_weight |
||||
|
||||
# 应用压制 |
||||
v_new = v - (v - threshold) * weight |
||||
v_new = np.clip(v_new, 0, 255).astype(np.uint8) |
||||
|
||||
# 合成压制后的图像 |
||||
adjusted_hsv = cv2.merge([h, s.astype(np.uint8), v_new]) |
||||
adjusted = cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR) |
||||
|
||||
# ------------------- |
||||
# 融合原图模糊版 → 减少颜色突兀 |
||||
# ------------------- |
||||
blurred = cv2.GaussianBlur(img, (blur_radius | 1, blur_radius | 1), 0) |
||||
|
||||
# 构建融合权重 mask,仅对过渡区域起作用 |
||||
color_blend_mask = np.clip(weight, 0, 1) * color_blend_strength |
||||
color_blend_mask = color_blend_mask[..., None] # 扩展为 (H,W,1) 用于通道融合 |
||||
|
||||
# 将融合区域混合模糊 |
||||
final = adjusted.astype(np.float32) * (1 - color_blend_mask) + blurred.astype(np.float32) * color_blend_mask |
||||
final = np.clip(final, 0, 255).astype(np.uint8) |
||||
|
||||
return final |
||||
|
||||
|
||||
def process_image(input_path, output_path, threshold=210, reduction=0.6): |
||||
""" |
||||
""" |
||||
try: |
||||
img = cv2.imread(input_path) |
||||
if img is None: |
||||
raise ValueError("无法读取图像,请检查路径是否正确") |
||||
|
||||
#result = perceptual_adjustment(img, threshold, reduction) |
||||
result = perceptual_smooth_adjustment_color_blend(img, threshold, reduction) |
||||
|
||||
cv2.imwrite(output_path, result) |
||||
print(f"处理成功,结果已保存到: {output_path}") |
||||
|
||||
return True |
||||
|
||||
except Exception as e: |
||||
print(f"处理失败: {str(e)}") |
||||
return False |
||||
|
||||
def sigmoid(x, center=0.0, slope=10.0): |
||||
return 1 / (1 + np.exp(-slope * (x - center))) |
||||
|
||||
|
||||
def reduce_highlights_lab_advanced_hsvmask( |
||||
img, |
||||
highlight_thresh=220, |
||||
strength=30, |
||||
sigma=15, |
||||
detail_boost=1.0, |
||||
preserve_local_contrast=True |
||||
): |
||||
""" |
||||
LAB高光压制 + HSV感知蒙版 + 细节保留 |
||||
""" |
||||
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) |
||||
V = hsv[:, :, 2].astype(np.float32) |
||||
|
||||
# 1. 生成高光 mask,过渡平滑 |
||||
mask = sigmoid(V, center=highlight_thresh, slope=0.05) |
||||
mask = np.clip(mask, 0, 1) |
||||
mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=2) |
||||
|
||||
mask_vis = (mask * 255).astype(np.uint8) |
||||
|
||||
# 2. LAB 空间亮度压制 |
||||
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab) |
||||
L, a, b = cv2.split(img_lab) |
||||
L = L.astype(np.float32) |
||||
|
||||
# 3. 模糊和细节 |
||||
L_blur = cv2.GaussianBlur(L, (0, 0), sigma) |
||||
L_detail = L - L_blur |
||||
|
||||
# 4. 替代方案:压制 L,但融合方式更柔和 |
||||
L_target = L_blur - strength * mask |
||||
L_target = np.clip(L_target, 0, 255) |
||||
|
||||
if preserve_local_contrast: |
||||
# 保留细节 + 局部对比度(避免过度平滑) |
||||
L_new = L_target + detail_boost * L_detail |
||||
else: |
||||
# 单纯压制亮度 |
||||
L_new = L_target |
||||
|
||||
L_new = np.clip(L_new, 0, 255).astype(np.uint8) |
||||
|
||||
# 5. 合成回去 |
||||
lab_new = cv2.merge([L_new, a, b]) |
||||
result = cv2.cvtColor(lab_new, cv2.COLOR_Lab2BGR) |
||||
|
||||
return result, mask_vis |
||||
|
||||
def suppress_highlights_keep_texture(image_bgr, v_thresh=225, target_v=215, sigma=1): |
||||
"""""" |
||||
|
||||
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV) |
||||
h, s, v = cv2.split(image_hsv) |
||||
v = v.astype(np.float32) |
||||
|
||||
v_blur = cv2.GaussianBlur(v, (0, 0), sigmaX=sigma) |
||||
detail = v - v_blur |
||||
|
||||
# 构建 soft mask(0~1),用于动态压制 |
||||
mask = (v_blur > v_thresh).astype(np.float32) |
||||
# weight 越大压得越狠 |
||||
weight = np.clip((v_blur - v_thresh) / 20.0, 0, 1) * mask # 20 是压制带宽 |
||||
#weight =weight*1.2 |
||||
# 将亮度压到 target_v 的线性混合: |
||||
v_compress = v_blur * (1 - weight) + target_v * weight |
||||
|
||||
v_new = v_compress + detail |
||||
v_new = np.clip(v_new, 0, 255).astype(np.uint8) |
||||
|
||||
hsv_new = cv2.merge([h, s, v_new]) |
||||
result_bgr = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR) |
||||
|
||||
return result_bgr |
||||
|
||||
def correct_light_again_hsv(image_path): |
||||
img = cv2.imread(image_path) |
||||
result, mask_vis = reduce_highlights_lab_advanced_hsvmask( |
||||
img, |
||||
highlight_thresh=225, |
||||
strength=15, |
||||
sigma=10, |
||||
detail_boost=1.2 |
||||
) |
||||
result_bgr= suppress_highlights_keep_texture(result) |
||||
output_image_path = image_path.replace(".jpg", "_light02.jpg") |
||||
cv2.imwrite( |
||||
output_image_path, |
||||
result_bgr |
||||
) |
||||
return output_image_path |
||||
|
||||
def generate_curve_lut(x_points, y_points): |
||||
""" |
||||
输入采样点,生成 256 长度的查找表(LUT) |
||||
""" |
||||
cs = CubicSpline(x_points, y_points, bc_type='natural') |
||||
x = np.arange(256) |
||||
y = cs(x) |
||||
y = np.clip(y, 0, 255).astype(np.uint8) |
||||
return y |
||||
|
||||
def apply_curve(img, lut): |
||||
""" |
||||
对图像的每个通道应用曲线 LUT(复合通道) |
||||
""" |
||||
result = cv2.LUT(img, lut) |
||||
return result |
||||
|
||||
|
||||
def apply_curve_up_image(image_path,image_cache_dir): |
||||
"""提亮""" |
||||
x_points = [0, 124, 255] |
||||
y_points = [0, 131, 255] |
||||
lut = generate_curve_lut(x_points, y_points) |
||||
#adjusted = apply_curve(img, lut) |
||||
|
||||
image_name_result = image_path.split("/")[-1].replace(".jpg", "_up.jpg") |
||||
result_path= os.path.join(image_cache_dir,image_name_result) |
||||
image_bgr = cv2.imread(image_path) |
||||
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) |
||||
image_hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV).astype(np.float32) |
||||
h, s, v = cv2.split(image_hsv) |
||||
v_mean = np.mean(v) |
||||
print(f"v_mean{v_mean}") |
||||
if v_mean < 60: |
||||
adjusted = apply_curve(image_bgr, lut) |
||||
adjusted2 = apply_curve(adjusted, lut) |
||||
cv2.imwrite(result_path, adjusted2) |
||||
return result_path |
||||
|
||||
else: |
||||
image_name_result = image_path.split("/")[-1].replace(".jpg", "_o.jpg") |
||||
result_original_path = os.path.join(image_cache_dir, image_name_result) |
||||
shutil.copy(image_path,result_original_path) |
||||
return result_original_path |
||||
|
||||
def apply_curve_down_image(image_path,image_cache_dir): |
||||
"""压暗""" |
||||
x_points = [0, 131, 255] |
||||
y_points = [0, 124, 255] |
||||
lut = generate_curve_lut(x_points, y_points) |
||||
# adjusted = apply_curve(img, lut) |
||||
image_name_result = image_path.split("/")[-1].replace(".jpg", "_down.jpg") |
||||
result_path= os.path.join(image_cache_dir,image_name_result) |
||||
image_bgr = cv2.imread(image_path) |
||||
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) |
||||
image_hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV).astype(np.float32) |
||||
h, s, v = cv2.split(image_hsv) |
||||
|
||||
v_mean = np.mean(v) |
||||
print(f"v_mean{v_mean}") |
||||
if v_mean > 110: |
||||
adjusted = apply_curve(image_bgr, lut) |
||||
adjusted2 = apply_curve(adjusted, lut) |
||||
cv2.imwrite(result_path, adjusted2) |
||||
return result_path |
||||
else: |
||||
image_name_result = image_path.split("/")[-1].replace(".jpg", "_o.jpg") |
||||
result_original_path = os.path.join(image_cache_dir, image_name_result) |
||||
shutil.copy(image_path, result_original_path) |
||||
return result_original_path |
||||
|
||||
|
||||
def sharpen_image(image_path, output_path): |
||||
""" |
||||
修复颜色问题的锐化处理函数 |
||||
""" |
||||
# 1. 读取图片并确保RGB格式 |
||||
image = cv2.imread(image_path) |
||||
if image is None: |
||||
raise ValueError("无法读取图片,请检查路径是否正确") |
||||
|
||||
# 2. 转换为RGB并保持一致性 |
||||
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
||||
|
||||
# 3. 使用PIL处理时不再转换 |
||||
pil_img = Image.fromarray(rgb_image) |
||||
|
||||
# 3.1 锐化处理 |
||||
enhancer = ImageEnhance.Sharpness(pil_img) |
||||
sharpened = enhancer.enhance(2.0) |
||||
|
||||
# 3.2 对比度增强 |
||||
contrast_enhancer = ImageEnhance.Contrast(sharpened) |
||||
final_image = contrast_enhancer.enhance(1.2) |
||||
|
||||
# 4. 转换回numpy数组 |
||||
cv_image = np.array(final_image) |
||||
|
||||
# 5. 修复颜色问题的非锐化掩蔽 |
||||
# 先分离通道,分别处理,再合并 |
||||
b, g, r = cv2.split(cv_image) |
||||
|
||||
def unsharp_channel(channel): |
||||
blurred = cv2.GaussianBlur(channel, (0, 0), 3) |
||||
return cv2.addWeighted(channel, 1.5, blurred, -0.5, 0) |
||||
|
||||
b_sharp = unsharp_channel(b) |
||||
g_sharp = unsharp_channel(g) |
||||
r_sharp = unsharp_channel(r) |
||||
|
||||
# 合并通道 |
||||
sharpened_cv = cv2.merge([b_sharp, g_sharp, r_sharp]) |
||||
|
||||
# 6. 保存结果(保持BGR格式) |
||||
cv2.imwrite(output_path, cv2.cvtColor(sharpened_cv, cv2.COLOR_RGB2BGR)) |
||||
|
||||
def correct_texture_image(input_path,image_result_dir,output_path): |
||||
"""""" |
||||
|
||||
image_cache_dir= os.path.join(image_result_dir,"cache") |
||||
os.makedirs(image_cache_dir, exist_ok=True) |
||||
input_path_cure_up = apply_curve_up_image(input_path,image_cache_dir) |
||||
|
||||
input_path_cure_down_result = apply_curve_down_image(input_path_cure_up,image_cache_dir) |
||||
|
||||
|
||||
print("input_path_correct", input_path_cure_down_result) |
||||
shadow_up_path = input_path_cure_down_result.replace(".jpg", "_shadow_shadow_add_color_white_unsharp.jpg") |
||||
photoshop_actions_emulation(input_path_cure_down_result, shadow_up_path) |
||||
|
||||
shutil.copy(shadow_up_path,output_path) |
||||
time.sleep(1) |
||||
try: |
||||
shutil.rmtree(image_cache_dir) |
||||
except: |
||||
print("删除文件错误") |
||||
|
||||
return shadow_up_path |
||||
|
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
|
||||
arg = argparse.ArgumentParser() |
||||
arg.add_argument('--input_path', type=str, default=f"") |
||||
arg.add_argument('--output_path', type=str, default=f"") |
||||
args = arg.parse_args() |
||||
image_result_dir=os.path.dirname(args.output_path) |
||||
os.makedirs(image_result_dir, exist_ok=True) |
||||
|
||||
start_time= time.time() |
||||
correct_texture_image(args.input_path,image_result_dir,args.output_path) |
||||
end_time = time.time() |
||||
total_time = round(end_time - start_time, 2) |
||||
""" |
||||
DreamTech,PS动作F7两次+Shift F7一次 |
||||
F7:::加暗*2 |
||||
Shift F7 公仔*1 |
||||
公仔: 加暗,加暗,加饱和度上色,白位加白,锐化 |
||||
""" |
||||
|
||||
|
||||
|
||||
|
||||
Loading…
Reference in new issue