我跟随本教程的图像对齐通过openCV.人脸检测没有任何作用,所以我自己添加了它。
import cv2
import numpy as np
import dlib
from PIL import Image
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def extract_bounding_points(rect):
bl_corner = rect.bl_corner()
tr_corner = rect.tr_corner()
return [(bl_corner.x,bl_corner.y ), (tr_corner.x,tr_corner.y )]
def get_face(im):
hogFaceDetector = dlib.get_frontal_face_detector()
faceRect = hogFaceDetector(im, 0)[0]
face = im[faceRect.top() : faceRect.bottom(), faceRect.left() : faceRect.right()]
return face
def alignImages(im1, im2, resize=True):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
faceImg1 = get_face(im1Gray)
faceImg2 = get_face(im2Gray)
coloredFace1 = get_face(im1)
coloredFace2 = get_face(im2)
if resize:
height, width = faceImg1.shape
faceImg1 = cv2.resize(faceImg1, (height * 2, width * 2))
coloredFace1 = cv2.resize(coloredFace1, (height * 2, width * 2))
height, width = faceImg2.shape
faceImg2 = cv2.resize(faceImg2, (height * 2, width * 2))
coloredFace2 = cv2.resize(coloredFace2, (height * 2, width * 2))
# Detect AKAZE features and compute descriptors.
akaze = cv2.AKAZE_create(MAX_FEATURES)
keypoints1, descriptors1 = akaze.detectAndCompute(faceImg1, None)
keypoints2, descriptors2 = akaze.detectAndCompute(faceImg2, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(coloredFace1, keypoints1, coloredFace2, keypoints2, matches, None)
cv2.imwrite("results/matches.jpg", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
# Read reference image
refFilename = "data/original.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "data/rotated.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "results/aligned.jpg"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
但是我不断地发现下一个错误
错误: /io/opencv/modules/features2d/src/kaze/AKAZEFeatures.cpp:1295:错误:(-215:断言失败) y0 -6* scale >= 0& y0 +6* scale < Lx.rows in function 'Sample_Derivative_Response_Radius6‘
执行keypoints1, descriptors1 = akaze.detectAndCompute(faceImg1, None)
时
我认为这与图像的大小有关,所以我添加了resize
的部分
if resize:
height, width = faceImg1.shape
faceImg1 = cv2.resize(faceImg1, (height * 2, width * 2))
coloredFace1 = cv2.resize(coloredFace1, (height * 2, width * 2))
height, width = faceImg2.shape
faceImg2 = cv2.resize(faceImg2, (height * 2, width * 2))
coloredFace2 = cv2.resize(coloredFace2, (height * 2, width * 2))
我还检查了脸部图像的大小是否相等,图像是否被正确加载。那我该怎么解决呢?
发布于 2021-05-20 13:55:37
MAX_FEATURES
参数的AKAZE_create
不是有效的参数。
请参阅创建文档
retval = cv.AKAZE_create( [,descriptor_type[,descriptor_size[,descriptor_channels[,阈值[,nOctaves[,nOctaveLayers,扩散率])
将akaze = cv2.AKAZE_create(MAX_FEATURES)
替换为:
akaze = cv2.AKAZE_create()
注意:
我找不到办法限制AKAZE的关键点数量。
我认为没有办法限制AKAZE的关键点数量(例如,反对SIFT )。
https://stackoverflow.com/questions/67616696
复制相似问题