import os, time, oss2, cv2 from alibabacloud_facebody20191230.client import Client from alibabacloud_facebody20191230.models import DetectFaceRequest from alibabacloud_tea_openapi.models import Config from alibabacloud_tea_util.models import RuntimeOptions def detect_face_feature_points(url): try: detect_face_request = DetectFaceRequest( image_url=url, landmark=True, quality=True, max_face_number=5, pose=True ) runtime = RuntimeOptions() result = facebody_client.detect_face_with_options(detect_face_request, runtime) return result.body except Exception as e: print(e) return None def main(): # 定义人脸关键特征点,以降低运算量,提高抖动检测的兼容性 # https://help.aliyun.com/document_detail/151969.html?spm=a2c4g.155519.0.0.742d7d63kadwAt # feature_points = ['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge', 'nose_tip', 'left_eye', 'right_eye', 'top_lip', 'bottom_lip'] feature_points = [29, 36, 45, 52, 59, 66, 75, 98] # 在oss上定义图片缩放样式 # https://help.aliyun.com/document_detail/44688.html?spm=a2c4g.11186623.6.1001.6b3c4c3f3ZQZ7z # https://oss-console-img-demo-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/example.jpg?x-oss-process=image/resize,p_50 # 采用python 获取授权oss上的图片url # https://help.aliyun.com/document_detail/31947.html?spm=a2c4g.11186623.6.1003.6b3c4c3f3ZQZ7z style = 'image/resize,p_50' # 在oss上循环同一次拍照pid上的2张图片,分别获取缩放后的2张图片url prefix = f'zhengliang/photos/photos_1000/' filelist = oss2.ObjectIteratorV2(bucket_client, prefix=prefix, delimiter='/') # for file in filelist: # pid = file.key.split('/')[-2] # url1 = f'zhengliang/photos/photos_1000/{pid}/photo1/103_1.jpg' # url2 = f'zhengliang/photos/photos_1000/{pid}/photo2/103_8.jpg' # sign_url1 = bucket_client.sign_url('GET', url1, 3600, params={'x-oss-process': style}) # sign_url2 = bucket_client.sign_url('GET', url2, 3600, params={'x-oss-process': style}) pid = '45610' url1 = f'zhengliang/photos/photos_1000/{pid}/photo1/103_1.jpg' url2 = f'zhengliang/photos/photos_1000/{pid}/photo2/103_8.jpg' sign_url1 = bucket_client.sign_url('GET', url1, 3600, params={'x-oss-process': style}) sign_url2 = bucket_client.sign_url('GET', url2, 3600, params={'x-oss-process': style}) result1 = detect_face_feature_points(sign_url1) photo1_face_count = result1.data.face_count photo1_feature_points = result1.data.landmarks # result2 = detect_face_feature_points(sign_url2) # photo2_face_count = result2.data.face_count # photo2_feature_points = result2.data.landmarks # 将特征点坐标转换成坐标编号为key,坐标值为value的字典 print(photo1_feature_points) photo1_feature_points_dict = {} face_i = 0 while face_i < photo1_face_count: points = {} index = 0 i = 0 while i < len(photo1_feature_points): if i == 105: i = 0 # 105是下一张脸的第一个特征点 photo1_feature_points_dict[face_i] = points face_i = face_i + 1 print(i) points[index] = {'x': photo1_feature_points[face_i * face_feature_points_count + i], 'y': photo1_feature_points[face_i * face_feature_points_count + i + 1]} i = i + 2 index = index + 1 print(photo1_feature_points_dict) index = 0 i = 0 while i < len(photo1_feature_points): photo1_feature_points_dict[index] = {'x': photo1_feature_points[i], 'y': photo1_feature_points[i + 1]} i = i + 2 index = index + 1 photo2_feature_points_dict = {} index = 0 i = 0 while i < len(photo2_feature_points): photo2_feature_points_dict[index] = {'x': photo2_feature_points[i], 'y': photo2_feature_points[i + 1]} i = i + 2 index = index + 1 localphoto1 = '/data/datasets/photos/{pid}/photo1/103_1.jpg' localphoto2 = '/data/datasets/photos/{pid}/photo2/103_8.jpg' # 用opencv在localphoto中画出特征点位置 photo1 = cv2.imread(localphoto1) photo2 = cv2.imread(localphoto2) for feature_point in feature_points: cv2.circle(photo1, (photo1_feature_points_dict[feature_point]['x'], photo1_feature_points_dict[feature_point]['y']), 2, (0, 0, 255), -1) cv2.circle(photo2, (photo2_feature_points_dict[feature_point]['x'], photo2_feature_points_dict[feature_point]['y']), 2, (0, 0, 255), -1) cv2.imwrite(f'/data/datasets/photos/{pid}/photo1/103_1.point.jpg', photo1) cv2.imwrite(f'/data/datasets/photos/{pid}/photo2/103_8.point.jpg', photo2) # 采用阿里云的105个人脸特征点检测api,获取2张图片的人脸特征点信息 # https://help.aliyun.com/document_detail/155645.html?spm=a2c4g.11186623.6.1002.6b3c4c3f3ZQZ7z # 过滤人脸特征点信息中的关键特征点,计算2张图片中每个人脸关键特征点的坐标差值,以判断是否抖动 # 将每次拍照的两张图片每个人脸中的关键特征点的距离保存到数据库中,以便后续分析,每条记录的pid为拍照的pid,face_id为人脸id,feature_point为关键特征点,distance为距离,最大距离为max_distance,最小距离为min_distance,平均距离为avg_distance,抖动阈值为jitter_threshold,抖动标志为jitter_flag,抖动标志为0表示不抖动,抖动标志为1表示抖动 # 在原始图片1上画出人脸关键特征点,叠加到原始图片2上,生成透明重叠图片,以便观察图片是否抖动 # 通过steamit图表画出每次拍照的两张图片每个人脸中的关键特征点的距离变化曲线,同时可以点击打开生成的透明重叠抖动观察图片 if __name__ == '__main__': # 定义阿里云oss的配置参数 access_key_id = 'LTAI5tSReWm8hz7dSYxxth8f' access_key_secret = '8ywTDF9upPAtvgXtLKALY2iMYHIxdS' facebody_endpoint = 'facebody.cn-shanghai.aliyuncs.com' endpoint = 'oss-cn-shanghai.aliyuncs.com' bucket_name = 'suwa3d-test' face_feature_points_count = 105 bucket_client = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name) facebody_client = Client(Config( access_key_id=access_key_id, access_key_secret=access_key_secret, endpoint=facebody_endpoint, region_id='cn-shanghai' )) main()